def process_once(config): count = 0 n_errors = 0 while True: with model.session_scope() as session: record = (session.query( model.Submission, model.Survey.modified).join(model.Survey).filter( (model.Submission.modified < model.Survey.modified) | ((model.Submission.modified == None) & (model.Survey.modified != None))).first()) if record is None: break sub, htime = record log.info("Processing %s, %s < %s", sub, sub and sub.modified, htime) if count == 0: log.info("Starting new job") calculator = Calculator.scoring(sub) calculator.mark_entire_survey_dirty(sub.survey) calculator.execute() if sub.error: n_errors += 1 count += 1 sub.modified = sub.survey.modified session.commit() log.info("Successfully recalculated scores for %d submissions.", count) log.info("Of those, %d contain user errors.", n_errors) return count, n_errors
def create_submission(self, survey, user): session = object_session(survey) program = survey.program submission = model.Submission(program=program, organisation=user.organisation, survey=survey, title="First submission", approval='draft') session.add(submission) for m in program.measures: # Preload response type to avoid autoflush response_type = m.response_type qnode_measure = m.get_qnode_measure(survey) if not qnode_measure: continue response = model.Response(qnode_measure=qnode_measure, submission=submission, user=user) response.attachments = [] response.not_relevant = False response.modified = datetime.datetime.utcnow() response.approval = 'final' response.comment = "Response for %s" % m.title session.add(response) if response_type.name == 'Yes / No': response.response_parts = [{'index': 1, 'note': "Yes"}] elif response_type.name in { 'Numerical', 'External Numerical', 'Planned', 'Actual' }: response.response_parts = [{'value': 1}] else: raise ValueError("Unknown response type") response.attachments.append( model.Attachment(file_name="File %s 1" % m.title, url="Bar", storage='external', organisation=user.organisation)) response.attachments.append( model.Attachment(file_name="File %s 2" % m.title, url="Baz", storage='external', organisation=user.organisation)) response.attachments.append( model.Attachment(file_name="File %s 3" % m.title, blob=b'A blob', storage='external', organisation=user.organisation)) session.flush() calculator = Calculator.scoring(submission) calculator.mark_entire_survey_dirty(submission.survey) calculator.execute() submission.approval = 'final' session.flush() return submission
def create_submission(self): # Respond to a survey with model.session_scope() as session: program = session.query(model.Program).one() user = (session.query( model.AppUser).filter_by(email='clerk').one()) organisation = (session.query( model.Organisation).filter_by(name='Utility').one()) survey = (session.query( model.Survey).filter_by(title='Survey 1').one()) submission = model.Submission(program_id=program.id, organisation_id=organisation.id, survey_id=survey.id, title="Submission", approval='draft') session.add(submission) for m in program.measures: # Preload response type to avoid autoflush response_type = m.response_type qnode_measure = m.get_qnode_measure(survey) if not qnode_measure: continue response = model.Response(submission=submission, qnode_measure=qnode_measure, user=user) response.attachments = [] response.not_relevant = False response.modified = sa.func.now() response.approval = 'final' response.comment = "Response for %s" % m.title session.add(response) if response_type.name == 'Yes / No': response.response_parts = [{'index': 1, 'note': "Yes"}] else: response.response_parts = [{'value': 1}] calculator = Calculator.scoring(submission) calculator.mark_entire_survey_dirty(submission.survey) calculator.execute() functions = list(submission.rnodes) self.assertAlmostEqual(functions[0].score, 33) self.assertAlmostEqual(functions[1].score, 0) self.assertAlmostEqual(functions[0].qnode.total_weight, 33) self.assertAlmostEqual(functions[1].qnode.total_weight, 0) return submission.id
def set_approval(self, session, rnode, approval, user_session): promote = self.get_arguments('promote') missing = self.get_argument('missing', '') submission = rnode.submission promoted = demoted = created = 0 calculator = Calculator.scoring(submission) for response, is_new in self.walk_responses( session, rnode, missing, user_session.user): if is_new: response.not_relevant = True response.approval = approval response.comment = "*Marked Not Relevant by bulk approval " \ "process (was previously empty)*" created += 1 else: i1 = APPROVAL_STATES.index(response.approval) i2 = APPROVAL_STATES.index(approval) if i1 < i2 and 'PROMOTE' in promote: response.approval = approval response.modified = func.now() promoted += 1 elif i1 > i2 and 'DEMOTE' in promote: response.approval = approval response.modified = func.now() demoted += 1 calculator.mark_measure_dirty(response.qnode_measure) calculator.execute() if created: self.reason("Created %d (NA)" % created) if demoted: self.reason("Demoted %d" % demoted) if promoted: self.reason("Promoted %d" % promoted) if created == promoted == demoted == 0: self.reason("No changes to approval status")
def put(self, submission_id): '''Update existing.''' if submission_id == '': raise errors.MethodError("Submission ID required") approval = self.get_argument('approval', '') with model.session_scope() as session: user_session = self.get_user_session(session) submission = session.query(model.Submission).get(submission_id) if not submission: raise errors.MissingDocError("No such submission") policy = user_session.policy.derive({ 'org': submission.organisation, 'approval': approval, 'surveygroups': submission.surveygroups, }) policy.verify('surveygroup_interact') policy.verify('submission_edit') current_level = 0 verbs = [] if approval: self.check_approval_down_one(session, submission, approval) current_level = APPROVAL_STATES.index(submission.approval) if approval != submission.approval: verbs.append('state') submission.approval = approval self._update(submission, self.request_son) if session.is_modified(submission): verbs.append('update') # update measures approval state, when save edited submission no approval, so here should not if approval != '': approval_level = APPROVAL_STATES.index(approval) if 'state' in verbs and approval_level > current_level: approval_level = approval_level - 1 responses = (session.query(model.Response).filter( model.Response.submission_id == submission.id, model.Response.approval == APPROVAL_STATES[approval_level])) for response in responses: # update = updater(response, error_factory=errors.ModelError) # update('title', son) response.approval = approval try: calculator = Calculator.scoring(submission) calculator.mark_measure_dirty( response.qnode_measure) calculator.execute() except ResponseTypeError as e: raise errors.ModelError(str(e)) act = Activities(session) act.record(user_session.user, response, verbs) act.ensure_subscription(user_session.user, response, response.submission, self.reason) if submission.deleted: submission.deleted = False verbs.append('undelete') act = Activities(session) act.record(user_session.user, submission, verbs) act.ensure_subscription(user_session.user, submission, submission.organisation, self.reason) self.get(submission_id)
def duplicate(self, submission, s_submission, session): measure_ids = { str(qm.measure_id) for qm in submission.survey.ordered_qnode_measures } qnode_ids = {str(q.id) for q in submission.survey.ordered_qnodes} s_rnodes = (session.query(model.ResponseNode).filter_by( submission_id=s_submission.id).filter( model.ResponseNode.qnode_id.in_(qnode_ids)).all()) for rnode in s_rnodes: if str(rnode.qnode_id) not in qnode_ids: continue # Duplicate session.expunge(rnode) make_transient(rnode) # Customise rnode.program = submission.program rnode.submission = submission session.add(rnode) session.flush() # No need to flush because no dependencies for response in s_submission.responses: if str(response.measure_id) not in measure_ids: continue attachments = list(response.attachments) # Fetch lazy-loaded fields response.comment # Duplicate session.expunge(response) make_transient(response) # Customise response.submission_id = submission.id response.program_id = submission.program_id response.survey_id = submission.survey_id response.approval = 'draft' session.add(response) session.flush() # Same thing for attachments for attachment in attachments: # Fetch lazy-loaded fields attachment.blob # Duplicate session.expunge(attachment) make_transient(attachment) attachment.id = None # Customise attachment.response = response session.add(attachment) session.flush() calculator = Calculator.scoring(submission) calculator.mark_entire_survey_dirty(submission.survey) calculator.execute()
def process_submission_file(self, all_rows, session, submission, user): program_qnodes = (session.query( model.QuestionNode).filter_by(program_id=submission.program.id)) try: order = title = '' for row_num in range(0, len(all_rows) - 1): order, title = self.parse_order_title(all_rows, row_num, "A") function = program_qnodes.filter_by(parent_id=None, title=title).one() log.debug("function: %s", function) function_order = order order, title = self.parse_order_title(all_rows, row_num, "B") process = program_qnodes.filter_by(parent_id=function.id, title=title).one() log.debug("process: %s", process) order, title = self.parse_order_title(all_rows, row_num, "C") subprocess = program_qnodes.filter_by(parent_id=process.id, title=title).one() log.debug("subprocess: %s", subprocess) order, title = self.parse_order_title(all_rows, row_num, "D") measure = [ qm.measure for qm in subprocess.qnode_measures if qm.measure.title.split('\n')[0] == title ] if len(measure) == 1: measure = measure[0] else: raise Exception( "This survey does not match the target survey.") log.debug("measure: %s", measure) log.debug("measure response_type: %s", measure.response_type.name) response = model.Response() response.program_id = submission.program.id response.survey_id = submission.survey.id response.measure_id = measure.id response.submission_id = submission.id response.user_id = user.id response.comment = all_rows[row_num][col2num("K")] # FIXME: Hard-coded; should be read from file response.not_relevant = False response.modified = datetime.datetime.utcnow() response.approval = 'draft' response_part = [] response_part.append( self.parse_response_type(all_rows, row_num, measure.response_type, "E")) if function_order != "7": response_part.append( self.parse_response_type(all_rows, row_num, measure.response_type, "F")) response_part.append( self.parse_response_type(all_rows, row_num, measure.response_type, "G")) response_part.append( self.parse_response_type(all_rows, row_num, measure.response_type, "H")) response.response_parts = response_part response.audit_reason = "Import" session.add(response) except sqlalchemy.orm.exc.NoResultFound: raise errors.ModelError( "Survey structure does not match: Row %d: %s %s" % (row_num + 2, order, title)) except ImportError as e: raise errors.ModelError("Row %d: %s %s: %s" % (row_num + 2, order, title, str(e))) except Exception as e: raise errors.InternalModelError( "Row %d: %s %s: %s" % (row_num + 2, order, title, str(e))) calculator = Calculator.scoring(submission) calculator.mark_entire_survey_dirty(submission.survey) calculator.execute()
def set_relevance(self, session, rnode, relevance, user_session): not_relevant = relevance == 'NOT_RELEVANT' if not_relevant: missing = self.get_argument('missing', '') else: # When marking responses as not NA, just ignore missing responses. # It's not possible to create new ones because a non-NA response # must have its response parts filled in. missing = 'IGNORE' submission = rnode.submission changed = failed = created = 0 calculator = Calculator.scoring(submission) for response, is_new in self.walk_responses( session, rnode, missing, user_session.user): if not_relevant: response.not_relevant = True if is_new: response.approval = submission.approval response.comment = ( "*Marked Not Relevant as a bulk action " "(was previously empty)*") created += 1 else: changed += 1 else: response.not_relevant = False changed += 1 policy = user_session.policy.derive({ 'org': response.submission.organisation, 'submission': response.submission, 'approval': response.approval, 'index': APPROVAL_STATES.index, 'surveygroups': response.submission.surveygroups, }) policy.verify('surveygroup_interact') try: policy.verify('response_edit') except errors.AuthzError as e: err = ( "Response %s: %s. You might need to downgrade the " "response's approval status. You can use the bulk " "approval tool for this.".format( response.qnode_measure.get_path(), e)) raise errors.AuthzError(err) calculator.mark_measure_dirty(response.qnode_measure) calculator.execute() if created: self.reason("Created %d" % created) if changed: self.reason("Changed %d" % changed) if failed: self.reason( "%d measures could not be changed, because a relevant " " response must have valid data." % failed) if created == changed == failed == 0: self.reason("No changes to relevance")
def put(self, submission_id, qnode_id): '''Save (create or update).''' approval = self.get_argument('approval', '') relevance = self.get_argument('relevance', '') with model.session_scope() as session: user_session = self.get_user_session(session) submission = ( session.query(model.Submission) .get(submission_id)) if not submission: raise errors.MissingDocError("No such submission") policy = user_session.policy.derive({ 'org': submission.organisation, 'submission': submission, 'approval': approval, 'index': APPROVAL_STATES.index, 'surveygroups': submission.surveygroups, }) policy.verify('surveygroup_interact') policy.verify('rnode_edit') rnode = ( session.query(model.ResponseNode) .get((submission_id, qnode_id))) verbs = [] if not rnode: qnode = ( session.query(model.QuestionNode) .get((qnode_id, submission.program.id))) if qnode is None: raise errors.MissingDocError("No such question node") rnode = model.ResponseNode.from_qnode( qnode, submission, create=True) importance = self.request_son.get('importance') if importance is not None: if int(importance) <= 0: self.request_son['importance'] = None elif int(importance) > 5: self.request_son['importance'] = 5 urgency = self.request_son.get('urgency') if urgency is not None: if int(urgency) <= 0: self.request_son['urgency'] = None elif int(urgency) > 5: self.request_son['urgency'] = 5 self._update(rnode, self.request_son) if session.is_modified(rnode): verbs.append('update') session.flush() if approval: policy.verify('submission_response_approval') yield self.set_approval( session, rnode, approval, user_session) verbs.append('state') if relevance: yield self.set_relevance( session, rnode, relevance, user_session) verbs.append('update') try: calculator = Calculator.scoring(submission) calculator.mark_qnode_dirty(rnode.qnode) calculator.execute() except ResponseTypeError as e: raise errors.ModelError(str(e)) act = Activities(session) act.record(user_session.user, rnode, verbs) act.ensure_subscription( user_session.user, rnode, rnode.submission, self.reason) self.get(submission_id, qnode_id)
def put(self, submission_id, measure_id): '''Save (create or update).''' with model.session_scope(version=True) as session: user_session = self.get_user_session(session) submission = (session.query(model.Submission).get(submission_id)) if not submission: raise errors.MissingDocError("No such submission") response = (session.query(model.Response).get( (submission_id, measure_id))) verbs = [] if response is None: program_id = submission.program_id survey_id = submission.survey_id qnode_measure = (session.query(model.QnodeMeasure).get( (program_id, survey_id, measure_id))) if qnode_measure is None: raise errors.MissingDocError("No such measure") response = model.Response(qnode_measure=qnode_measure, submission=submission, approval='draft') session.add(response) verbs.append('create') else: same_user = response.user.id == user_session.user.id td = datetime.datetime.utcnow() - response.modified hours_since_update = td.total_seconds() / 60 / 60 if same_user and hours_since_update < 8: response.version_on_update = False modified = self.request_son.get("latest_modified", 0) # Convert to int to avoid string conversion errors during # JSON marshalling. if int(modified) < int(response.modified.timestamp()): raise errors.ModelError( "This response has changed since you loaded the" " page. Please copy or remember your changes and" " refresh the page.") verbs.append('update') if self.request_son['approval'] != response.approval: verbs.append('state') self._update(response, self.request_son, user_session.user) if not session.is_modified(response) and 'update' in verbs: verbs.remove('update') policy = user_session.policy.derive({ 'org': submission.organisation, 'submission': submission, 'approval': response.approval, 'index': APPROVAL_STATES.index, 'surveygroups': submission.surveygroups, }) policy.verify('surveygroup_interact') policy.verify('response_edit') session.flush() # Prevent creating a second version during following operations response.version_on_update = False try: calculator = Calculator.scoring(submission) calculator.mark_measure_dirty(response.qnode_measure) calculator.execute() except ResponseTypeError as e: raise errors.ModelError(str(e)) act = Activities(session) act.record(user_session.user, response, verbs) act.ensure_subscription(user_session.user, response, response.submission, self.reason) self.get(submission_id, measure_id)