class Criterion(DefaultTableMixin, UUIDMixin, ActiveMixin, WriteTrackingMixin): __tablename__ = 'criterion' # table columns user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=False) name = db.Column(db.String(255), nullable=False) description = db.Column(db.Text) public = db.Column(db.Boolean(), default=False, nullable=False, index=True) default = db.Column(db.Boolean(), default=True, nullable=False, index=True) # relationships # user via User Model # assignment many-to-many criterion with association assignment_criteria user_uuid = association_proxy('user', 'uuid') assignment_criteria = db.relationship("AssignmentCriterion", back_populates="criterion", lazy='dynamic') comparison_criteria = db.relationship("ComparisonCriterion", backref="criterion", lazy='dynamic') answer_criteria_scores = db.relationship("AnswerCriterionScore", backref="criterion", lazy='dynamic') # hybrid and other functions @hybrid_property def compared(self): return self.compare_count > 0 @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Criterion Unavailable" if not message: message = "Sorry, this criterion was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def get_active_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Criterion Unavailable" if not message: message = "Sorry, this criterion was deleted or is no longer accessible." return super(cls, cls).get_active_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__() cls.compare_count = column_property( select([func.count(ComparisonCriterion.id)]). where(ComparisonCriterion.criterion_id == cls.id). scalar_subquery(), deferred=True, group="counts" )
class AnswerComment(DefaultTableMixin, UUIDMixin, AttemptMixin, ActiveMixin, WriteTrackingMixin): __tablename__ = 'answer_comment' # table columns answer_id = db.Column(db.Integer, db.ForeignKey('answer.id', ondelete="CASCADE"), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=False) content = db.Column(db.Text) comment_type = db.Column(Enum(AnswerCommentType), nullable=False, index=True) draft = db.Column(db.Boolean(), default=False, nullable=False, index=True) # relationships # answer via Answer Model # user via User Model #readonly # hybrid and other functionsx course_id = association_proxy('answer', 'course_id', creator=lambda course_id: import_module('compair.models.answer').Answer(course_id=course_id)) course_uuid = association_proxy('answer', 'course_uuid') assignment_id = association_proxy('answer', 'assignment_id') assignment_uuid = association_proxy('answer', 'assignment_uuid') answer_uuid = association_proxy('answer', 'uuid') user_avatar = association_proxy('user', 'avatar') user_uuid = association_proxy('user', 'uuid') user_displayname = association_proxy('user', 'displayname') user_student_number = association_proxy('user', 'student_number') user_fullname = association_proxy('user', 'fullname') user_fullname_sortable = association_proxy('user', 'fullname_sortable') user_system_role = association_proxy('user', 'system_role') @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Feedback Unavailable" if not message: message = "Sorry, this feedback was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def get_active_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Feedback Unavailable" if not message: message = "Sorry, this feedback was deleted or is no longer accessible." return super(cls, cls).get_active_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__()
class XAPILog(DefaultTableMixin, WriteTrackingMixin): __tablename__ = 'xapi_log' # table columns statement = db.Column(db.Text) transmitted = db.Column(db.Boolean(), default=False, nullable=False, index=True) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__()
class Comparison(DefaultTableMixin, UUIDMixin, WriteTrackingMixin): __tablename__ = 'comparison' # table columns assignment_id = db.Column(db.Integer, db.ForeignKey('assignment.id', ondelete="CASCADE"), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=False) answer1_id = db.Column(db.Integer, db.ForeignKey('answer.id', ondelete="CASCADE"), nullable=False) answer2_id = db.Column(db.Integer, db.ForeignKey('answer.id', ondelete="CASCADE"), nullable=False) winner = db.Column(EnumType(WinningAnswer, name="winner"), nullable=True) comparison_example_id = db.Column(db.Integer, db.ForeignKey('comparison_example.id', ondelete="SET NULL"), nullable=True) round_compared = db.Column(db.Integer, default=0, nullable=False) completed = db.Column(db.Boolean(name='completed'), default=False, nullable=False, index=True) pairing_algorithm = db.Column(EnumType(PairingAlgorithm, name="pairing_algorithm"), nullable=True, default=PairingAlgorithm.random) # relationships # assignment via Assignment Model # user via User Model # comparison_example via ComparisonExample Model comparison_criteria = db.relationship("ComparisonCriterion", backref="comparison", lazy='immediate') answer1 = db.relationship("Answer", foreign_keys=[answer1_id]) answer2 = db.relationship("Answer", foreign_keys=[answer2_id]) # hyprid and other functions course_id = association_proxy( 'assignment', 'course_id', creator=lambda course_id: import_module( 'compair.models.assignment').Assignment(course_id=course_id)) course_uuid = association_proxy('assignment', 'course_uuid') assignment_uuid = association_proxy('assignment', 'uuid') answer1_uuid = association_proxy('answer1', 'uuid') answer2_uuid = association_proxy('answer2', 'uuid') user_avatar = association_proxy('user', 'avatar') user_uuid = association_proxy('user', 'uuid') user_displayname = association_proxy('user', 'displayname') user_fullname = association_proxy('user', 'fullname') user_fullname_sortable = association_proxy('user', 'fullname_sortable') user_system_role = association_proxy('user', 'system_role') @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Comparison Unavailable" if not message: message = "Sorry, this comparison was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__() def comparison_pair_winner(self): from . import WinningAnswer winner = None if self.winner == WinningAnswer.answer1: winner = ComparisonWinner.key1 elif self.winner == WinningAnswer.answer2: winner = ComparisonWinner.key2 elif self.winner == WinningAnswer.draw: winner = ComparisonWinner.draw return winner def convert_to_comparison_pair(self): return ComparisonPair(key1=self.answer1_id, key2=self.answer2_id, winner=self.comparison_pair_winner()) @classmethod def _get_new_comparison_pair(cls, course_id, assignment_id, user_id, pairing_algorithm, comparisons): from . import Assignment, UserCourse, CourseRole, Answer, AnswerScore, PairingAlgorithm # ineligible authors - eg. instructors, TAs, dropped student, current user non_students = UserCourse.query \ .filter(and_( UserCourse.course_id == course_id, UserCourse.course_role != CourseRole.student )) ineligible_user_ids = [non_student.user_id \ for non_student in non_students] ineligible_user_ids.append(user_id) answers_with_score = Answer.query \ .with_entities(Answer, AnswerScore.score ) \ .outerjoin(AnswerScore) \ .filter(and_( Answer.user_id.notin_(ineligible_user_ids), Answer.assignment_id == assignment_id, Answer.active == True, Answer.practice == False, Answer.draft == False )) \ .all() scored_objects = [] for answer_with_score in answers_with_score: scored_objects.append( ScoredObject(key=answer_with_score.Answer.id, score=answer_with_score.score, rounds=answer_with_score.Answer.round, variable1=None, variable2=None, wins=None, loses=None, opponents=None)) comparison_pairs = [ comparison.convert_to_comparison_pair() for comparison in comparisons ] comparison_pair = generate_pair(package_name=pairing_algorithm.value, scored_objects=scored_objects, comparison_pairs=comparison_pairs, log=current_app.logger) return comparison_pair @classmethod def create_new_comparison(cls, assignment_id, user_id, skip_comparison_examples): from . import Assignment, ComparisonExample, ComparisonCriterion # get all comparisons for the user comparisons = Comparison.query \ .filter_by( user_id=user_id, assignment_id=assignment_id ) \ .all() is_comparison_example_set = False answer1 = None answer2 = None comparison_example_id = None round_compared = 0 assignment = Assignment.query.get(assignment_id) pairing_algorithm = assignment.pairing_algorithm if pairing_algorithm == None: pairing_algorithm = PairingAlgorithm.random if not skip_comparison_examples: # check comparison examples first comparison_examples = ComparisonExample.query \ .filter_by( assignment_id=assignment_id, active=True ) \ .all() # check if user has not completed all comparison examples for comparison_example in comparison_examples: comparison = next( (c for c in comparisons if c.comparison_example_id == comparison_example.id), None) if comparison == None: is_comparison_example_set = True answer1 = comparison_example.answer1 answer2 = comparison_example.answer2 comparison_example_id = comparison_example.id break if not is_comparison_example_set: comparison_pair = Comparison._get_new_comparison_pair( assignment.course_id, assignment_id, user_id, pairing_algorithm, comparisons) answer1 = Answer.query.get(comparison_pair.key1) answer2 = Answer.query.get(comparison_pair.key2) round_compared = min(answer1.round + 1, answer2.round + 1) # update round counters answers = [answer1, answer2] for answer in answers: answer.round += 1 db.session.add(answer) comparison = Comparison(assignment_id=assignment_id, user_id=user_id, answer1_id=answer1.id, answer2_id=answer2.id, winner=None, round_compared=round_compared, comparison_example_id=comparison_example_id, pairing_algorithm=pairing_algorithm) db.session.add(comparison) for criterion in assignment.criteria: comparison_criterion = ComparisonCriterion( comparison=comparison, criterion_id=criterion.id, winner=None, content=None, ) db.session.commit() return comparison @classmethod def update_scores_1vs1(cls, comparison): from . import AnswerScore, AnswerCriterionScore, \ ComparisonCriterion, ScoringAlgorithm assignment_id = comparison.assignment_id answer1_id = comparison.answer1_id answer2_id = comparison.answer2_id # get all other comparisons for the answers not including the ones being calculated other_comparisons = Comparison.query \ .options(load_only('winner', 'answer1_id', 'answer2_id')) \ .filter(and_( Comparison.assignment_id == assignment_id, Comparison.id != comparison.id, or_( Comparison.answer1_id.in_([answer1_id, answer2_id]), Comparison.answer2_id.in_([answer1_id, answer2_id]) ) )) \ .all() scores = AnswerScore.query \ .filter( AnswerScore.answer_id.in_([answer1_id, answer2_id]) ) \ .all() # get all other criterion comparisons for the answers not including the ones being calculated other_criterion_comparisons = ComparisonCriterion.query \ .join("comparison") \ .filter(and_( Comparison.assignment_id == assignment_id, ~Comparison.id == comparison.id, or_( Comparison.answer1_id.in_([answer1_id, answer2_id]), Comparison.answer2_id.in_([answer1_id, answer2_id]) ) )) \ .all() criteria_scores = AnswerCriterionScore.query \ .filter( AnswerCriterionScore.answer_id.in_([answer1_id, answer2_id]) ) \ .all() #update answer criterion scores updated_criteria_scores = [] for comparison_criterion in comparison.comparison_criteria: criterion_id = comparison_criterion.criterion_id score1 = next((criterion_score for criterion_score in criteria_scores if criterion_score.answer_id == answer1_id and criterion_score.criterion_id == criterion_id), AnswerCriterionScore(assignment_id=assignment_id, answer_id=answer1_id, criterion_id=criterion_id)) updated_criteria_scores.append(score1) key1_scored_object = score1.convert_to_scored_object( ) if score1 != None else ScoredObject( key=answer1_id, score=None, variable1=None, variable2=None, rounds=0, wins=0, opponents=0, loses=0, ) score2 = next((criterion_score for criterion_score in criteria_scores if criterion_score.answer_id == answer2_id and criterion_score.criterion_id == criterion_id), AnswerCriterionScore(assignment_id=assignment_id, answer_id=answer2_id, criterion_id=criterion_id)) updated_criteria_scores.append(score2) key2_scored_object = score2.convert_to_scored_object( ) if score2 != None else ScoredObject( key=answer2_id, score=None, variable1=None, variable2=None, rounds=0, wins=0, opponents=0, loses=0, ) result_1, result_2 = calculate_score_1vs1( package_name=ScoringAlgorithm.elo.value, key1_scored_object=key1_scored_object, key2_scored_object=key2_scored_object, winner=comparison_criterion.comparison_pair_winner(), other_comparison_pairs=[ c.convert_to_comparison_pair() for c in other_criterion_comparisons if c.criterion_id == criterion_id ], log=current_app.logger) for score, result in [(score1, result_1), (score2, result_2)]: score.score = result.score score.variable1 = result.variable1 score.variable2 = result.variable2 score.rounds = result.rounds score.wins = result.wins score.loses = result.loses score.opponents = result.opponents updated_scores = [] score1 = next( (score for score in scores if score.answer_id == answer1_id), AnswerScore(assignment_id=assignment_id, answer_id=answer1_id)) updated_scores.append(score1) key1_scored_object = score1.convert_to_scored_object( ) if score1 != None else ScoredObject( key=answer1_id, score=None, variable1=None, variable2=None, rounds=0, wins=0, opponents=0, loses=0, ) score2 = next( (score for score in scores if score.answer_id == answer2_id), AnswerScore(assignment_id=assignment_id, answer_id=answer2_id)) updated_scores.append(score2) key2_scored_object = score2.convert_to_scored_object( ) if score2 != None else ScoredObject( key=answer2_id, score=None, variable1=None, variable2=None, rounds=0, wins=0, opponents=0, loses=0, ) result_1, result_2 = calculate_score_1vs1( package_name=ScoringAlgorithm.elo.value, key1_scored_object=key1_scored_object, key2_scored_object=key2_scored_object, winner=comparison.comparison_pair_winner(), other_comparison_pairs=[ c.convert_to_comparison_pair() for c in other_comparisons ], log=current_app.logger) for score, result in [(score1, result_1), (score2, result_2)]: score.score = result.score score.variable1 = result.variable1 score.variable2 = result.variable2 score.rounds = result.rounds score.wins = result.wins score.loses = result.loses score.opponents = result.opponents db.session.add_all(updated_criteria_scores) db.session.add_all(updated_scores) db.session.commit() return updated_scores @classmethod def calculate_scores(cls, assignment_id): from . import AnswerScore, AnswerCriterionScore, \ AssignmentCriterion, ScoringAlgorithm # get all comparisons for this assignment and only load the data we need comparisons = Comparison.query \ .filter(Comparison.assignment_id == assignment_id) \ .all() assignment_criteria = AssignmentCriterion.query \ .with_entities(AssignmentCriterion.criterion_id) \ .filter_by(assignment_id=assignment_id, active=True) \ .all() comparison_criteria = [] comparison_pairs = [] answer_ids = set() for comparison in comparisons: answer_ids.add(comparison.answer1_id) answer_ids.add(comparison.answer2_id) comparison_criteria.extend(comparison.comparison_criteria) comparison_pairs.append(comparison.convert_to_comparison_pair()) # calculate answer score comparison_results = calculate_score( package_name=ScoringAlgorithm.elo.value, comparison_pairs=comparison_pairs, log=current_app.logger) scores = AnswerScore.query \ .filter(AnswerScore.answer_id.in_(answer_ids)) \ .all() updated_answer_scores = update_answer_scores(scores, assignment_id, comparison_results) db.session.add_all(updated_answer_scores) # calculate answer criterion scores criterion_comparison_results = {} for assignment_criterion in assignment_criteria: comparison_pairs = [] for comparison_criterion in comparison_criteria: if comparison_criterion.criterion_id != assignment_criterion.criterion_id: continue comparison_pairs.append( comparison_criterion.convert_to_comparison_pair()) criterion_comparison_results[ assignment_criterion.criterion_id] = calculate_score( package_name=ScoringAlgorithm.elo.value, comparison_pairs=comparison_pairs, log=current_app.logger) scores = AnswerCriterionScore.query \ .filter(AnswerCriterionScore.answer_id.in_(answer_ids)) \ .all() updated_answer_criteria_scores = update_answer_criteria_scores( scores, assignment_id, criterion_comparison_results) db.session.add_all(updated_answer_criteria_scores) db.session.commit()
class Answer(DefaultTableMixin, UUIDMixin, AttemptMixin, ActiveMixin, WriteTrackingMixin): __tablename__ = 'answer' # table columns assignment_id = db.Column(db.Integer, db.ForeignKey('assignment.id', ondelete="CASCADE"), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=True) group_id = db.Column(db.Integer, db.ForeignKey('group.id', ondelete="CASCADE"), nullable=True) file_id = db.Column(db.Integer, db.ForeignKey('file.id', ondelete="SET NULL"), nullable=True) content = db.Column(db.Text) round = db.Column(db.Integer, default=0, nullable=False) practice = db.Column(db.Boolean(), default=False, nullable=False, index=True) draft = db.Column(db.Boolean(), default=False, nullable=False, index=True) top_answer = db.Column(db.Boolean(), default=False, nullable=False, index=True) comparable = db.Column(db.Boolean(), default=True, nullable=False, index=True) submission_date = db.Column(db.DateTime(timezone=True), nullable=True) # relationships # assignment via Assignment Model # user via User Model # group via Group Model # file via File Model comments = db.relationship("AnswerComment", backref="answer") score = db.relationship("AnswerScore", uselist=False, backref="answer") criteria_scores = db.relationship("AnswerCriterionScore", backref="answer") # hybrid and other functions course_id = association_proxy( 'assignment', 'course_id', creator=lambda course_id: import_module( 'compair.models.assignment').Assignment(course_id=course_id)) course_uuid = association_proxy('assignment', 'course_uuid') assignment_uuid = association_proxy('assignment', 'uuid') user_avatar = association_proxy('user', 'avatar') user_uuid = association_proxy('user', 'uuid') user_displayname = association_proxy('user', 'displayname') user_student_number = association_proxy('user', 'student_number') user_fullname = association_proxy('user', 'fullname') user_fullname_sortable = association_proxy('user', 'fullname_sortable') user_system_role = association_proxy('user', 'system_role') group_uuid = association_proxy('group', 'uuid') group_avatar = association_proxy('group', 'avatar') group_name = association_proxy('group', 'name') @hybrid_property def private_comment_count(self): return self.comment_count - self.public_comment_count @hybrid_property def group_answer(self): return self.group_id != None @group_answer.expression def group_answer(cls): return cls.group_id != None @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Answer Unavailable" if not message: message = "Sorry, this answer was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def get_active_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Answer Unavailable" if not message: message = "Sorry, this answer was deleted or is no longer accessible." return super(cls, cls).get_active_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__() cls.comment_count = column_property(select([ func.count(AnswerComment.id) ]).where( and_(AnswerComment.answer_id == cls.id, AnswerComment.active == True, AnswerComment.draft == False)), deferred=True, group='counts') cls.public_comment_count = column_property(select([ func.count(AnswerComment.id) ]).where( and_(AnswerComment.answer_id == cls.id, AnswerComment.active == True, AnswerComment.draft == False, AnswerComment.comment_type == AnswerCommentType.public)), deferred=True, group='counts') cls.self_evaluation_count = column_property(select( [func.count(AnswerComment.id)]).where( and_( AnswerComment.comment_type == AnswerCommentType.self_evaluation, AnswerComment.active == True, AnswerComment.draft == False, AnswerComment.answer_id == cls.id)), deferred=True, group='counts')
class Course(DefaultTableMixin, UUIDMixin, ActiveMixin, WriteTrackingMixin): __tablename__ = 'course' # table columns name = db.Column(db.String(255), nullable=False) year = db.Column(db.Integer, nullable=False) term = db.Column(db.String(255), nullable=False) sandbox = db.Column(db.Boolean(), nullable=False, default=False, index=True) start_date = db.Column(db.DateTime(timezone=True), nullable=True) end_date = db.Column(db.DateTime(timezone=True), nullable=True) # relationships # user many-to-many course with association user_course user_courses = db.relationship("UserCourse", back_populates="course", lazy="dynamic") assignments = db.relationship("Assignment", backref="course", lazy="dynamic") grades = db.relationship("CourseGrade", backref="course", lazy='dynamic') groups = db.relationship("Group", backref="course", lazy='dynamic') # lti lti_contexts = db.relationship("LTIContext", backref="compair_course", lazy='dynamic') # hybrid and other functions @hybrid_property def lti_linked(self): return self.lti_context_count > 0 @hybrid_property def lti_has_sis_data(self): return self.lti_context_sis_count > 0 @hybrid_property def lti_sis_data(self): sis_data = {} for lti_context in self.lti_contexts.all(): sis_course_id = lti_context.lis_course_offering_sourcedid sis_section_id = lti_context.lis_course_section_sourcedid if not sis_course_id or not sis_section_id: continue sis_data.setdefault(sis_course_id, []).append(sis_section_id) return sis_data @hybrid_property def available(self): now = dateutil.parser.parse(datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) # must be after start date if set if self.start_date and self.start_date.replace(tzinfo=pytz.utc) > now: return False # must be before end date if set if self.end_date and now >= self.end_date.replace(tzinfo=pytz.utc): return False return True @hybrid_property def start_date_order(self): if self.start_date: return self.start_date elif self.min_assignment_answer_start: return self.min_assignment_answer_start else: return self.created @start_date_order.expression def start_date_order(cls): return case([ (cls.start_date != None, cls.start_date), (cls.min_assignment_answer_start != None, cls.min_assignment_answer_start) ], else_ = cls.created) def calculate_grade(self, user): from . import CourseGrade CourseGrade.calculate_grade(self, user) def calculate_group_grade(self, group): from . import CourseGrade CourseGrade.calculate_group_grade(self, group) def calculate_grades(self): from . import CourseGrade CourseGrade.calculate_grades(self) def clear_lti_links(self): for lti_context in self.lti_contexts.all(): lti_context.compair_course_id = None for assignment in self.assignments.all(): assignment.clear_lti_links() @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Course Unavailable" if not message: message = "Sorry, this course was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def get_active_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Course Unavailable" if not message: message = "Sorry, this course was deleted or is no longer accessible." return super(cls, cls).get_active_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): from .lti_models import LTIContext from . import Assignment, UserCourse, CourseRole super(cls, cls).__declare_last__() cls.groups_locked = column_property( exists([1]). where(and_( Assignment.course_id == cls.id, Assignment.active == True, Assignment.enable_group_answers == True, or_( and_(Assignment.compare_start == None, Assignment.answer_end <= sql_utcnow()), and_(Assignment.compare_start != None, Assignment.compare_start <= sql_utcnow()) ) )), deferred=True, group="group_associates" ) cls.min_assignment_answer_start = column_property( select([func.min(Assignment.answer_start)]). where(and_( Assignment.course_id == cls.id, Assignment.active == True )). scalar_subquery(), deferred=True, group="min_associates" ) cls.lti_context_count = column_property( select([func.count(LTIContext.id)]). where(LTIContext.compair_course_id == cls.id). scalar_subquery(), deferred=True, group="counts" ) cls.lti_context_sis_count = column_property( select([func.count(LTIContext.id)]). where(and_( LTIContext.compair_course_id == cls.id, LTIContext.lis_course_offering_sourcedid != None, LTIContext.lis_course_section_sourcedid != None, )). scalar_subquery(), deferred=True, group="counts" ) cls.assignment_count = column_property( select([func.count(Assignment.id)]). where(and_( Assignment.course_id == cls.id, Assignment.active == True )). scalar_subquery(), deferred=True, group="counts" ) cls.student_assignment_count = column_property( select([func.count(Assignment.id)]). where(and_( Assignment.course_id == cls.id, Assignment.active == True, Assignment.answer_start <= sql_utcnow() )). scalar_subquery(), deferred=True, group="counts" ) cls.student_count = column_property( select([func.count(UserCourse.id)]). where(and_( UserCourse.course_id == cls.id, UserCourse.course_role == CourseRole.student )). scalar_subquery(), deferred=True, group="counts" )
def active(cls): return db.Column(db.Boolean(), default=True, nullable=False, index=True)
class Assignment(DefaultTableMixin, UUIDMixin, ActiveMixin, WriteTrackingMixin): __tablename__ = 'assignment' # table columns user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=False) course_id = db.Column(db.Integer, db.ForeignKey('course.id', ondelete="CASCADE"), nullable=False) file_id = db.Column(db.Integer, db.ForeignKey('file.id', ondelete="SET NULL"), nullable=True) name = db.Column(db.String(255)) description = db.Column(db.Text) answer_start = db.Column(db.DateTime(timezone=True)) answer_end = db.Column(db.DateTime(timezone=True)) compare_start = db.Column(db.DateTime(timezone=True), nullable=True) compare_end = db.Column(db.DateTime(timezone=True), nullable=True) self_eval_start = db.Column(db.DateTime(timezone=True), nullable=True) self_eval_end = db.Column(db.DateTime(timezone=True), nullable=True) self_eval_instructions = db.Column(db.Text, nullable=True) number_of_comparisons = db.Column(db.Integer, nullable=False) students_can_reply = db.Column(db.Boolean(), default=False, nullable=False) enable_self_evaluation = db.Column(db.Boolean(), default=False, nullable=False) enable_group_answers = db.Column(db.Boolean(), default=False, nullable=False) scoring_algorithm = db.Column(Enum(ScoringAlgorithm), nullable=True, default=ScoringAlgorithm.elo) pairing_algorithm = db.Column(Enum(PairingAlgorithm), nullable=True, default=PairingAlgorithm.random) rank_display_limit = db.Column(db.Integer, nullable=True) educators_can_compare = db.Column(db.Boolean(), default=False, nullable=False) answer_grade_weight = db.Column(db.Integer, default=1, nullable=False) comparison_grade_weight = db.Column(db.Integer, default=1, nullable=False) self_evaluation_grade_weight = db.Column(db.Integer, default=1, nullable=False) peer_feedback_prompt = db.Column(db.Text) # relationships # user via User Model # course via Course Model # file via File Model # assignment many-to-many criterion with association assignment_criteria assignment_criteria = db.relationship( "AssignmentCriterion", back_populates="assignment", order_by=AssignmentCriterion.position.asc(), collection_class=ordering_list('position', count_from=0)) answers = db.relationship("Answer", backref="assignment", lazy="dynamic", order_by=Answer.submission_date.desc()) comparisons = db.relationship("Comparison", backref="assignment", lazy="dynamic") comparison_examples = db.relationship("ComparisonExample", backref="assignment", lazy="dynamic") scores = db.relationship("AnswerScore", backref="assignment", lazy="dynamic") criteria_scores = db.relationship("AnswerCriterionScore", backref="assignment", lazy="dynamic") grades = db.relationship("AssignmentGrade", backref="assignment", lazy='dynamic') # lti lti_resource_links = db.relationship("LTIResourceLink", backref="compair_assignment", lazy='dynamic') # hybrid and other functions course_uuid = association_proxy('course', 'uuid') user_avatar = association_proxy('user', 'avatar') user_uuid = association_proxy('user', 'uuid') user_displayname = association_proxy('user', 'displayname') user_student_number = association_proxy('user', 'student_number') user_fullname = association_proxy('user', 'fullname') user_fullname_sortable = association_proxy('user', 'fullname_sortable') user_system_role = association_proxy('user', 'system_role') lti_course_linked = association_proxy('course', 'lti_linked') @hybrid_property def lti_linked(self): return self.lti_resource_link_count > 0 @hybrid_property def criteria(self): criteria = [] for assignment_criterion in self.assignment_criteria: if assignment_criterion.active and assignment_criterion.criterion.active: criterion = assignment_criterion.criterion criterion.weight = assignment_criterion.weight criteria.append(criterion) return criteria @hybrid_property def compared(self): return self.all_compare_count > 0 @hybrid_property def answered(self): return self.comparable_answer_count > 0 def completed_comparison_count_for_user(self, user_id): return self.comparisons \ .filter_by( user_id=user_id, completed=True ) \ .count() def draft_comparison_count_for_user(self, user_id): return self.comparisons \ .filter_by( user_id=user_id, draft=True ) \ .count() def clear_lti_links(self): for lti_resource_link in self.lti_resource_links.all(): lti_resource_link.compair_assignment_id = None @hybrid_property def available(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) answer_start = self.answer_start.replace(tzinfo=pytz.utc) return answer_start <= now @hybrid_property def answer_period(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) answer_start = self.answer_start.replace(tzinfo=pytz.utc) answer_end = self.answer_end.replace(tzinfo=pytz.utc) return answer_start <= now < answer_end @hybrid_property def answer_grace(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) grace = self.answer_end.replace(tzinfo=pytz.utc) + datetime.timedelta( seconds=60) # add 60 seconds answer_start = self.answer_start.replace(tzinfo=pytz.utc) return answer_start <= now < grace @hybrid_property def compare_period(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) answer_end = self.answer_end.replace(tzinfo=pytz.utc) if not self.compare_start: return now >= answer_end else: return self.compare_start.replace( tzinfo=pytz.utc) <= now < self.compare_end.replace( tzinfo=pytz.utc) @hybrid_property def compare_grace(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) if self.compare_start and self.compare_end: grace = self.compare_end.replace( tzinfo=pytz.utc) + datetime.timedelta( seconds=60) # add 60 seconds compare_start = self.compare_start.replace(tzinfo=pytz.utc) return compare_start <= now < grace else: answer_end = self.answer_end.replace(tzinfo=pytz.utc) return now >= answer_end @hybrid_property def after_comparing(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) answer_end = self.answer_end.replace(tzinfo=pytz.utc) # compare period not set if not self.compare_start: return now >= answer_end # compare period is set else: return now >= self.compare_end.replace(tzinfo=pytz.utc) @hybrid_property def self_eval_period(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) if not self.enable_self_evaluation: return False elif self.self_eval_start: return self.self_eval_start.replace( tzinfo=pytz.utc) <= now < self.self_eval_end.replace( tzinfo=pytz.utc) else: if self.compare_start: return now >= self.compare_start.replace(tzinfo=pytz.utc) else: return now >= self.answer_end.replace(tzinfo=pytz.utc) @hybrid_property def self_eval_grace(self): now = dateutil.parser.parse( datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()) if not self.enable_self_evaluation: return False elif self.self_eval_start: grace = self.self_eval_end.replace( tzinfo=pytz.utc) + datetime.timedelta( seconds=60) # add 60 seconds return self.self_eval_start.replace(tzinfo=pytz.utc) <= now < grace else: if self.compare_start: return now >= self.compare_start.replace(tzinfo=pytz.utc) else: return now >= self.answer_end.replace(tzinfo=pytz.utc) @hybrid_property def evaluation_count(self): return self.compare_count + self.self_evaluation_count @hybrid_property def total_comparisons_required(self): return self.number_of_comparisons + self.comparison_example_count @hybrid_property def total_steps_required(self): return self.total_comparisons_required + ( 1 if self.enable_self_evaluation else 0) def calculate_grade(self, user): from . import AssignmentGrade AssignmentGrade.calculate_grade(self, user) def calculate_group_grade(self, group): from . import AssignmentGrade AssignmentGrade.calculate_group_grade(self, group) def calculate_grades(self): from . import AssignmentGrade AssignmentGrade.calculate_grades(self) @classmethod def validate_periods(cls, course_start, course_end, answer_start, answer_end, compare_start, compare_end, self_eval_start, self_eval_end): # validate answer period if answer_start == None: return (False, "No answer period start time provided.") elif answer_end == None: return (False, "No answer period end time provided.") course_start = course_start.replace( tzinfo=pytz.utc) if course_start else None course_end = course_end.replace( tzinfo=pytz.utc) if course_end else None answer_start = answer_start.replace(tzinfo=pytz.utc) answer_end = answer_end.replace(tzinfo=pytz.utc) # course start <= answer start < answer end <= course end if course_start and course_start > answer_start: return ( False, "Answer period start time must be after the course start time." ) elif answer_start >= answer_end: return ( False, "Answer period end time must be after the answer start time.") elif course_end and course_end < answer_end: return ( False, "Answer period end time must be before the course end time.") # validate compare period if compare_start == None and compare_end != None: return (False, "No compare period start time provided.") elif compare_start != None and compare_end == None: return (False, "No compare period end time provided.") elif compare_start != None and compare_end != None: compare_start = compare_start.replace(tzinfo=pytz.utc) compare_end = compare_end.replace(tzinfo=pytz.utc) # answer start < compare start < compare end <= course end if answer_start > compare_start: return ( False, "Compare period start time must be after the answer start time." ) elif compare_start > compare_end: return ( False, "Compare period end time must be after the compare start time." ) elif course_end and course_end < compare_end: return ( False, "Compare period end time must be before the course end time." ) # validate self-eval period if self_eval_start == None and self_eval_end != None: return (False, "No self-evaluation start time provided.") elif self_eval_start != None and self_eval_end == None: return (False, "No self-evaluation end time provided.") elif self_eval_start != None and self_eval_end != None: self_eval_start = self_eval_start.replace(tzinfo=pytz.utc) self_eval_end = self_eval_end.replace(tzinfo=pytz.utc) # self_eval start < self_eval end <= course end if self_eval_start > self_eval_end: return ( False, "Self-evaluation end time must be after the self-evaluation start time." ) elif course_end and course_end < self_eval_end: return ( False, "Self-evaluation end time must be before the course end time." ) # if comparison period defined: compare start < self_eval start if compare_start != None and compare_start > self_eval_start: return ( False, "Self-evaluation start time must be after the compare start time." ) # else: answer end < self_eval start # elif compare_start == None and answer_end >= self_eval_start: # return (False, "Self-evaluation start time must be after the answer end time.") return (True, None) @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Assignment Unavailable" if not message: message = "Sorry, this assignment was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def get_active_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Assignment Unavailable" if not message: message = "Sorry, this assignment was deleted or is no longer accessible." return super(cls, cls).get_active_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): from . import UserCourse, CourseRole, LTIResourceLink, Group super(cls, cls).__declare_last__() cls.answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role != CourseRole.dropped, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))).scalar_subquery(), deferred=True, group="counts") cls.student_answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role == CourseRole.student, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))).scalar_subquery(), deferred=True, group="counts") # Comparable answer count # To be consistent with student_answer_count, we are not counting # answers from sys admin here cls.comparable_answer_count = column_property(select([ func.count(Answer.id) ]).select_from( join(Answer, UserCourse, UserCourse.user_id == Answer.user_id, isouter=True).join( Group, Group.id == Answer.group_id, isouter=True)).where( and_( Answer.assignment_id == cls.id, Answer.active == True, Answer.draft == False, Answer.practice == False, Answer.comparable == True, or_( and_( UserCourse.course_id == cls.course_id, UserCourse.course_role != CourseRole.dropped, UserCourse.id != None), and_(Group.course_id == cls.course_id, Group.active == True, Group.id != None), ))).scalar_subquery(), deferred=True, group="counts") cls.comparison_example_count = column_property(select( [func.count(ComparisonExample.id)]).where( and_(ComparisonExample.assignment_id == cls.id, ComparisonExample.active == True)).scalar_subquery(), deferred=True, group="counts") cls.all_compare_count = column_property(select([ func.count(Comparison.id) ]).where(and_(Comparison.assignment_id == cls.id)).scalar_subquery(), deferred=True, group="counts") cls.compare_count = column_property(select([func.count( Comparison.id)]).where( and_(Comparison.assignment_id == cls.id, Comparison.completed == True)).scalar_subquery(), deferred=True, group="counts") cls.self_evaluation_count = column_property(select([ func.count(AnswerComment.id) ]).select_from( join(AnswerComment, Answer, AnswerComment.answer_id == Answer.id)).where( and_( AnswerComment.comment_type == AnswerCommentType.self_evaluation, AnswerComment.active == True, AnswerComment.answer_id == Answer.id, AnswerComment.draft == False, Answer.assignment_id == cls.id)).scalar_subquery(), deferred=True, group="counts") cls.lti_resource_link_count = column_property(select([ func.count(LTIResourceLink.id) ]).where( LTIResourceLink.compair_assignment_id == cls.id).scalar_subquery(), deferred=True, group="counts")
class Comparison(DefaultTableMixin, UUIDMixin, AttemptMixin, WriteTrackingMixin): __tablename__ = 'comparison' # table columns assignment_id = db.Column(db.Integer, db.ForeignKey('assignment.id', ondelete="CASCADE"), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete="CASCADE"), nullable=False) answer1_id = db.Column(db.Integer, db.ForeignKey('answer.id', ondelete="CASCADE"), nullable=False) answer2_id = db.Column(db.Integer, db.ForeignKey('answer.id', ondelete="CASCADE"), nullable=False) winner = db.Column(EnumType(WinningAnswer), nullable=True) comparison_example_id = db.Column(db.Integer, db.ForeignKey('comparison_example.id', ondelete="SET NULL"), nullable=True) round_compared = db.Column(db.Integer, default=0, nullable=False) completed = db.Column(db.Boolean(), default=False, nullable=False, index=True) pairing_algorithm = db.Column(EnumType(PairingAlgorithm), nullable=True, default=PairingAlgorithm.random) # relationships # assignment via Assignment Model # user via User Model # comparison_example via ComparisonExample Model comparison_criteria = db.relationship("ComparisonCriterion", backref="comparison", lazy='immediate') answer1 = db.relationship("Answer", foreign_keys=[answer1_id]) answer2 = db.relationship("Answer", foreign_keys=[answer2_id]) # hybrid and other functions course_id = association_proxy( 'assignment', 'course_id', creator=lambda course_id: import_module( 'compair.models.assignment').Assignment(course_id=course_id)) course_uuid = association_proxy('assignment', 'course_uuid') assignment_uuid = association_proxy('assignment', 'uuid') answer1_uuid = association_proxy('answer1', 'uuid') answer2_uuid = association_proxy('answer2', 'uuid') user_avatar = association_proxy('user', 'avatar') user_uuid = association_proxy('user', 'uuid') user_displayname = association_proxy('user', 'displayname') user_student_number = association_proxy('user', 'student_number') user_fullname = association_proxy('user', 'fullname') user_fullname_sortable = association_proxy('user', 'fullname_sortable') user_system_role = association_proxy('user', 'system_role') @hybrid_property def draft(self): return self.modified != self.created and not self.completed @draft.expression def draft(cls): return and_(cls.modified != cls.created, cls.completed == False) @classmethod def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None): if not title: title = "Comparison Unavailable" if not message: message = "Sorry, this comparison was deleted or is no longer accessible." return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message) @classmethod def __declare_last__(cls): super(cls, cls).__declare_last__() def comparison_pair_winner(self): from . import WinningAnswer winner = None if self.winner == WinningAnswer.answer1: winner = ComparisonWinner.key1 elif self.winner == WinningAnswer.answer2: winner = ComparisonWinner.key2 elif self.winner == WinningAnswer.draw: winner = ComparisonWinner.draw return winner def convert_to_comparison_pair(self): return ComparisonPair(key1=self.answer1_id, key2=self.answer2_id, winner=self.comparison_pair_winner()) @classmethod def _get_new_comparison_pair(cls, course_id, assignment_id, user_id, group_id, pairing_algorithm, comparisons): from . import Assignment, UserCourse, CourseRole, Answer, AnswerScore, \ PairingAlgorithm, AnswerCriterionScore, AssignmentCriterion, Group # exclude current user and those without a proper role. # note that sys admin (not enrolled in the course and thus no course role) can create answers. # they are considered eligible ineligibles = UserCourse.query \ .with_entities(UserCourse.user_id) \ .filter(and_( UserCourse.course_id == course_id, UserCourse.course_role == CourseRole.dropped )) \ .all() ineligible_user_ids = [ ineligible.user_id for ineligible in ineligibles ] ineligible_user_ids.append(user_id) query = Answer.query \ .with_entities(Answer, AnswerScore.score) \ .outerjoin(AnswerScore, AnswerScore.answer_id == Answer.id) \ .filter(and_( or_( ~Answer.user_id.in_(ineligible_user_ids), Answer.user_id == None # don't filter out group answers ), Answer.assignment_id == assignment_id, Answer.active == True, Answer.practice == False, Answer.draft == False, Answer.comparable == True )) if group_id: query = query.filter( or_(Answer.group_id != group_id, Answer.group_id == None)) answers_with_score = query.all() scored_objects = [] for answer_with_score in answers_with_score: scored_objects.append( ScoredObject(key=answer_with_score.Answer.id, score=answer_with_score.score, rounds=answer_with_score.Answer.round, variable1=None, variable2=None, wins=None, loses=None, opponents=None)) comparison_pairs = [ comparison.convert_to_comparison_pair() for comparison in comparisons ] # adaptive min delta algo requires extra criterion specific parameters if pairing_algorithm == PairingAlgorithm.adaptive_min_delta: # retrieve extra criterion score data answer_criterion_scores = AnswerCriterionScore.query \ .with_entities(AnswerCriterionScore.answer_id, AnswerCriterionScore.criterion_id, AnswerCriterionScore.score) \ .join(Answer) \ .filter(and_( Answer.user_id.notin_(ineligible_user_ids), Answer.assignment_id == assignment_id, Answer.active == True, Answer.practice == False, Answer.draft == False )) \ .all() assignment_criterion_weights = AssignmentCriterion.query \ .with_entities(AssignmentCriterion.criterion_id, AssignmentCriterion.weight) \ .filter(and_( AssignmentCriterion.assignment_id == assignment_id, AssignmentCriterion.active == True )) \ .all() criterion_scores = {} for criterion_score in answer_criterion_scores: scores = criterion_scores.setdefault(criterion_score.answer_id, {}) scores[criterion_score.criterion_id] = criterion_score.score criterion_weights = {} for the_weight in assignment_criterion_weights: criterion_weights[the_weight.criterion_id] = \ the_weight.weight comparison_pair = generate_pair( package_name=pairing_algorithm.value, scored_objects=scored_objects, comparison_pairs=comparison_pairs, criterion_scores=criterion_scores, criterion_weights=criterion_weights, log=current_app.logger) else: comparison_pair = generate_pair( package_name=pairing_algorithm.value, scored_objects=scored_objects, comparison_pairs=comparison_pairs, log=current_app.logger) return comparison_pair @classmethod def create_new_comparison(cls, assignment_id, user_id, skip_comparison_examples): from . import Assignment, ComparisonExample, ComparisonCriterion, \ UserCourse, CourseRole # get all comparisons for the user comparisons = Comparison.query \ .filter_by( user_id=user_id, assignment_id=assignment_id ) \ .all() is_comparison_example_set = False answer1 = None answer2 = None comparison_example_id = None round_compared = 0 assignment = Assignment.query.get(assignment_id) pairing_algorithm = assignment.pairing_algorithm if pairing_algorithm == None: pairing_algorithm = PairingAlgorithm.random # set user group restriction if # - enable_group_answers, user part of a group, and user is student group_id = None if assignment.enable_group_answers: user_course = UserCourse.query \ .filter_by( user_id=user_id, course_id=assignment.course_id, course_role=CourseRole.student ) \ .first() if user_course and user_course.group_id: group_id = user_course.group_id if not skip_comparison_examples: # check comparison examples first comparison_examples = ComparisonExample.query \ .filter_by( assignment_id=assignment_id, active=True ) \ .all() # check if user has not completed all comparison examples for comparison_example in comparison_examples: comparison = next( (c for c in comparisons if c.comparison_example_id == comparison_example.id), None) if comparison == None: is_comparison_example_set = True answer1 = comparison_example.answer1 answer2 = comparison_example.answer2 comparison_example_id = comparison_example.id break if not is_comparison_example_set: comparison_pair = Comparison._get_new_comparison_pair( assignment.course_id, assignment_id, user_id, group_id, pairing_algorithm, comparisons) answer1 = Answer.query.get(comparison_pair.key1) answer2 = Answer.query.get(comparison_pair.key2) round_compared = min(answer1.round + 1, answer2.round + 1) # update round counters answers = [answer1, answer2] for answer in answers: answer._write_tracking_enabled = False answer.round += 1 db.session.add(answer) comparison = Comparison(assignment_id=assignment_id, user_id=user_id, answer1_id=answer1.id, answer2_id=answer2.id, winner=None, round_compared=round_compared, comparison_example_id=comparison_example_id, pairing_algorithm=pairing_algorithm) db.session.add(comparison) for criterion in assignment.criteria: comparison_criterion = ComparisonCriterion( comparison=comparison, criterion_id=criterion.id, winner=None, content=None, ) db.session.add(comparison) db.session.commit() return comparison @classmethod def update_scores_1vs1(cls, comparison): from . import AnswerScore, AnswerCriterionScore, \ ComparisonCriterion, ScoringAlgorithm assignment = comparison.assignment answer1_id = comparison.answer1_id answer2_id = comparison.answer2_id # get all other comparisons for the answers not including the ones being calculated other_comparisons = Comparison.query \ .options(load_only('winner', 'answer1_id', 'answer2_id')) \ .filter(and_( Comparison.assignment_id == assignment.id, Comparison.id != comparison.id, or_( Comparison.answer1_id.in_([answer1_id, answer2_id]), Comparison.answer2_id.in_([answer1_id, answer2_id]) ) )) \ .all() scores = AnswerScore.query \ .filter( AnswerScore.answer_id.in_([answer1_id, answer2_id]) ) \ .all() # get all other criterion comparisons for the answers not including the ones being calculated other_criterion_comparisons = ComparisonCriterion.query \ .join("comparison") \ .filter(and_( Comparison.assignment_id == assignment.id, Comparison.id != comparison.id, or_( Comparison.answer1_id.in_([answer1_id, answer2_id]), Comparison.answer2_id.in_([answer1_id, answer2_id]) ) )) \ .all() criteria_scores = AnswerCriterionScore.query \ .filter(and_( AnswerCriterionScore.assignment_id == assignment.id, AnswerCriterionScore.answer_id.in_([answer1_id, answer2_id]) )) \ .all() #update answer criterion scores updated_criteria_scores = [] for comparison_criterion in comparison.comparison_criteria: criterion_id = comparison_criterion.criterion_id criterion_score1 = next( (criterion_score for criterion_score in criteria_scores if criterion_score.answer_id == answer1_id and criterion_score.criterion_id == criterion_id), AnswerCriterionScore(assignment_id=assignment.id, answer_id=answer1_id, criterion_id=criterion_id)) updated_criteria_scores.append(criterion_score1) key1_scored_object = criterion_score1.convert_to_scored_object() criterion_score2 = next( (criterion_score for criterion_score in criteria_scores if criterion_score.answer_id == answer2_id and criterion_score.criterion_id == criterion_id), AnswerCriterionScore(assignment_id=assignment.id, answer_id=answer2_id, criterion_id=criterion_id)) updated_criteria_scores.append(criterion_score2) key2_scored_object = criterion_score2.convert_to_scored_object() criterion_result_1, criterion_result_2 = calculate_score_1vs1( package_name=assignment.scoring_algorithm.value, key1_scored_object=key1_scored_object, key2_scored_object=key2_scored_object, winner=comparison_criterion.comparison_pair_winner(), other_comparison_pairs=[ cc.convert_to_comparison_pair() for cc in other_criterion_comparisons if cc.criterion_id == criterion_id ], log=current_app.logger) for score, result in [(criterion_score1, criterion_result_1), (criterion_score2, criterion_result_2)]: score.score = result.score score.variable1 = result.variable1 score.variable2 = result.variable2 score.rounds = result.rounds score.wins = result.wins score.loses = result.loses score.opponents = result.opponents updated_scores = [] score1 = next( (score for score in scores if score.answer_id == answer1_id), AnswerScore(assignment_id=assignment.id, answer_id=answer1_id)) updated_scores.append(score1) key1_scored_object = score1.convert_to_scored_object() score2 = next( (score for score in scores if score.answer_id == answer2_id), AnswerScore(assignment_id=assignment.id, answer_id=answer2_id)) updated_scores.append(score2) key2_scored_object = score2.convert_to_scored_object() result_1, result_2 = calculate_score_1vs1( package_name=assignment.scoring_algorithm.value, key1_scored_object=key1_scored_object, key2_scored_object=key2_scored_object, winner=comparison.comparison_pair_winner(), other_comparison_pairs=[ c.convert_to_comparison_pair() for c in other_comparisons ], log=current_app.logger) for score, result in [(score1, result_1), (score2, result_2)]: score.score = result.score score.variable1 = result.variable1 score.variable2 = result.variable2 score.rounds = result.rounds score.wins = result.wins score.loses = result.loses score.opponents = result.opponents db.session.add_all(updated_criteria_scores) db.session.add_all(updated_scores) db.session.commit() return updated_scores @classmethod def calculate_scores(cls, assignment_id): from . import AnswerScore, AnswerCriterionScore, \ AssignmentCriterion, ScoringAlgorithm assignment = Assignment.get(assignment_id) # get all comparisons for this assignment and only load the data we need comparisons = Comparison.query \ .filter(Comparison.assignment_id == assignment_id) \ .all() assignment_criteria = AssignmentCriterion.query \ .with_entities(AssignmentCriterion.criterion_id) \ .filter_by(assignment_id=assignment_id, active=True) \ .all() comparison_criteria = [] comparison_pairs = [] answer_ids = set() for comparison in comparisons: answer_ids.add(comparison.answer1_id) answer_ids.add(comparison.answer2_id) comparison_criteria.extend(comparison.comparison_criteria) comparison_pairs.append(comparison.convert_to_comparison_pair()) # calculate answer score comparison_results = calculate_score( package_name=assignment.scoring_algorithm.value, comparison_pairs=comparison_pairs, log=current_app.logger) scores = AnswerScore.query \ .filter(AnswerScore.answer_id.in_(answer_ids)) \ .all() updated_answer_scores = update_answer_scores(scores, assignment_id, comparison_results) db.session.add_all(updated_answer_scores) # calculate answer criterion scores criterion_comparison_results = {} for assignment_criterion in assignment_criteria: comparison_pairs = [] for comparison_criterion in comparison_criteria: if comparison_criterion.criterion_id != assignment_criterion.criterion_id: continue comparison_pairs.append( comparison_criterion.convert_to_comparison_pair()) criterion_comparison_results[ assignment_criterion.criterion_id] = calculate_score( package_name=assignment.scoring_algorithm.value, comparison_pairs=comparison_pairs, log=current_app.logger) scores = AnswerCriterionScore.query \ .filter(AnswerCriterionScore.answer_id.in_(answer_ids)) \ .all() updated_answer_criteria_scores = update_answer_criteria_scores( scores, assignment_id, criterion_comparison_results) db.session.add_all(updated_answer_criteria_scores) db.session.commit()