def get_dict_for_old_evaluations_for_submission_part(subm_part): available_versions = Version.objects.get_for_object(subm_part) version_evals = [] if available_versions.count() > 0: eval_part_versions, eval_versions = subm_part.get_all_evaluations_for_version(available_versions.count()-1) if eval_versions != None: for j in range(0, eval_versions.count()-1): if eval_versions[j].field_dict['evaluation_state'] != Evaluation.EVALUATION_STATE_NO_EVALUATION: version_eval_dict = {} version_eval_dict['feedback'] = eval_part_versions[j].field_dict['feedback'] version_eval_dict['assigned_points'] = eval_part_versions[j].field_dict['assigned_points'] version_eval_dict['user'] = CustomUser.objects.get(id=eval_versions[j].field_dict['user']).nickname version_eval_dict['eval_state'] = Evaluation.get_eval_state_string(eval_versions[j].field_dict['evaluation_state']) version_eval_dict['creation_date'] = eval_versions[j].field_dict['creation_date'] version_evals.append(version_eval_dict) return version_evals
def get_json_from_subms(submissions, elab_count_start, elab_count_end, tag_select_type, selected_tags, order): if tag_select_type=='all': for tag in selected_tags: submissions = submissions.filter(submissiontagrelation__submission_tag__name=tag) elif tag_select_type=='none': for tag in selected_tags: submissions = submissions.exclude(submissiontagrelation__submission_tag__name=tag) full_result_count = submissions.count() tag_statistics = {} subm_tags = SubmissionTag.objects.all() for st in subm_tags: tag_statistics[st.name] = SubmissionTagRelation.objects.filter(submission__in=submissions, submission_tag=st).count() #submissions = submissions.order_by('submission_date') submissions = submissions.order_by(order) #print(submissions.ordered) submissions = submissions[elab_count_start:elab_count_end] elab_count = 0 eval_dict = {} elab_dict = {} for subm in submissions: #check for locks on this submission try: lock = SubmissionLock.objects.get(submission=subm, release_date__gt=timezone.localtime(timezone.now())) subm_lock_dict={} subm_lock_dict['nickname'] = lock.user.nickname subm_lock_dict['timestamp'] = lock.lock_date except ObjectDoesNotExist: subm_lock_dict={} #check for tags on this submission elab = subm.elaboration subm_tag_rels = SubmissionTagRelation.objects.select_related(depth=1).filter(submission_id=subm.id) tags = [] for relation in subm_tag_rels: tag_name = relation.submission_tag.name tags.append(tag_name) #check for submission state subm_state = subm.submission_state for states in Submission.SUBMISSION_STATES: if states[0]==subm_state: subm_state=states[1] #check for versions of this submission subm_versions = Version.objects.get_for_object(subm) #print(subm_versions) version_dates = [] for version in subm_versions: version_date = version.field_dict['submission_date'] submission_date = subm.submission_date #the current entries in the database are not a version yet (in reversion) #if the submission_date of the current entry in the database is equal to a already versioned submission, #there was no change. otherwise it is a new submission, therefore a "version". if version_date != submission_date: version_dates.append(version_date) #TODO for each user email = elab.user.email size = 40 gravatar_url = "http://robohash.org/" + hashlib.md5(email.lower()).hexdigest() + "?gravatar=hashed" gravatar_url += urllib.urlencode({'d':'identicon', 's':str(size)}) this_elab = {} this_elab['title'] = elab.main_challenge.title this_elab['elaboration_id'] = elab.id this_elab['submission_id'] = subm.id this_elab['submission_tags'] = tags this_elab['submission_state'] = subm_state this_elab['submission_date'] = subm.submission_date this_elab['submission_version_dates'] = version_dates this_elab['submission_lock'] = subm_lock_dict #TODO adapt to multiple submitters user_dict = {} user_dict['nickname'] = elab.user.nickname user_dict['fullname'] = elab.user.first_name + ' ' + elab.user.last_name user_dict['gravatar_hash']=gravatar_url this_elab['user'] = user_dict elaboration_parts = ElaborationPart.objects.select_related().filter(elaboration=elab) this_elab['elaboration_parts_count'] = elaboration_parts.count() try: eval = Evaluation.objects.select_related().get(submission=subm) this_elab['evaluation_date'] = eval.creation_date this_elab['evaluation_state'] = Evaluation.get_eval_state_string(eval.evaluation_state) this_elab['evaluation_author'] = eval.user.nickname except ObjectDoesNotExist: eval = None this_elab['evaluation_date'] = 'none' elab_parts_dict = {} i=0 for elab_part in elaboration_parts: elab_part_dict = {} elab_part_dict['subtitle']=elab_part.sub_challenge.title #elab_part_dict['nickname']=elab_part.user.nickname #elab_part_dict['fullname']=elab_part.user.first_name + ' ' + elab_part.user.last_name elab_part_dict['elaboration_part_id']=elab_part.id try: subm_part = SubmissionPart.objects.get(elaboration_part=elab_part, submission=subm) eval_part = EvaluationPart.objects.get(evaluation=eval, submission_part=subm_part) feedback = eval_part.feedback assigned_points = eval_part.assigned_points except ObjectDoesNotExist: feedback = '' assigned_points = '' if feedback == None: feedback = '' if assigned_points == None: assigned_points = '' version_evals = get_dict_for_old_evaluations_for_submission_part(subm_part); elab_part_dict['eval_versions' + str(i)] = version_evals elab_part_dict['feedback'] = feedback elab_part_dict['assigned_points'] = assigned_points elab_part_dict['expected_points'] = elab_part.elaboration_points elab_part_dict['elaboration_text'] = elab_part.elaboration_text #images uploads_dict = {} uploads = Upload.objects.filter(elaboration_part=elab_part.id) if uploads: elab_parts_uploads = [] for upload in uploads: upload_dict = {} upload_dict['type'] = upload.docfile.url[-3:] upload_dict['path'] = urllib.unquote(upload.docfile.url) elab_parts_uploads.append(upload_dict) elab_part_dict['uploads'] = elab_parts_uploads elab_parts_dict['elaboration_part'+str(i)]=elab_part_dict i += 1 this_elab['elaboration_parts'] = elab_parts_dict elab_dict['elab'+str(elab_count)] = this_elab elab_count += 1 eval_dict['tag_statistics'] = tag_statistics eval_dict['full_result_count'] = full_result_count eval_dict['elab_dict'] = elab_dict return eval_dict
def get_version(request, course_short_title): if request.is_ajax(): subm_id = request.POST['submission_id'] version_nr = request.POST['version'] if version_nr != 'current': version_nr = int(version_nr) version_elements = [] subm = Submission.objects.get(id=subm_id) #Version.objects.filter(content_type=ContentType.objects.get_for_model(Submission), revision__date_created__gte=) subm_version_count = Version.objects.get_for_object(subm).count() if version_nr=='current': subm_date = subm.submission_date else: subm_version = Version.objects.get_for_object(subm)[version_nr] subm_date = subm_version.field_dict['submission_date'] parts = ElaborationPart.objects.filter(elaboration=subm.elaboration) i = 0 for part in parts: part_dict = {} text_element = part.elaboration_text if version_nr=='current': version_text = text_element else: text_versions = Version.objects.get_for_object(part) text = text_versions[version_nr] version_text = text.field_dict['elaboration_text'] part_dict['current' + str(i)] = text_element part_dict['part' + str(i)] = version_text #load old evaluations version_evals = [] subm_part = SubmissionPart.objects.get(elaboration_part=part, submission=subm) if version_nr=='current': version_evals = get_dict_for_old_evaluations_for_submission_part(subm_part) else: eval_part_versions, eval_versions = subm_part.get_all_evaluations_for_version(version_nr) #if current version requested, leave out current evaluation nr_of_versions = eval_versions.count() #if subm_version_count-1 == version_nr: # nr_of_versions -= 1 for j in range(0, nr_of_versions): if eval_versions[j].field_dict['evaluation_state'] != Evaluation.EVALUATION_STATE_NO_EVALUATION: version_eval_dict = {} version_eval_dict['feedback'] = eval_part_versions[j].field_dict['feedback'] version_eval_dict['assigned_points'] = eval_part_versions[j].field_dict['assigned_points'] version_eval_dict['user'] = CustomUser.objects.get(id=eval_versions[j].field_dict['user']).nickname version_eval_dict['eval_state'] = Evaluation.get_eval_state_string(eval_versions[j].field_dict['evaluation_state']) version_eval_dict['creation_date'] = eval_versions[j].field_dict['creation_date'] version_evals.append(version_eval_dict) part_dict['eval_versions' + str(i)] = version_evals version_elements.append(part_dict) i += 1 #print(version_elements) response = simplejson.dumps(version_elements, cls=DjangoJSONEncoder) return HttpResponse(response, mimetype='application/json')