Example #1
0
    def create_evaluation(self, form_dict):
        evaluator = form_dict["demographics"].save(commit=False)
        evaluator.email = form_dict["email"].cleaned_data.get("email", None)
        evaluator.save()

        started = arrow.get(
            dict(self.storage.get_step_data("start"))["first_open"])
        first_continue = arrow.get(
            dict(self.storage.get_step_data("start"))["answered"])
        evaluation = Evaluation(platform=self.platform,
                                evaluator=evaluator,
                                started=started.datetime,
                                first_continue=first_continue.datetime)

        evaluation.save()

        for i, question in enumerate(self.platform.questions.all()):
            step = "q_%s" % i
            answer = form_dict[step].save(commit=False)
            answer.evaluation = evaluation
            answer.question = question
            answer.answer_time = arrow.get(
                dict(self.storage.get_step_data(step))["answered"]).datetime
            answer.save()

        return evaluation
Example #2
0
def create_evaluation(request):
    """
    Endpoint: /users/create_user/
    Method: POST
    Allowed users: All user
    Response status code: 201 created
    Description: admin can create users of a
    """

    #  if not request.user.has_perm('users.add_user'):
    #     return Response({'error': 'can not create user'}, status=status.HTTP_403_FORBIDDEN)

    evaluation_details = request.data
    lecturer = User.objects.get(id=evaluation_details['lecturer_id'])
    student = User.objects.get(id=evaluation_details['student_id'])
    #unit = Unit.objects.get(id=evaluation_details['unit_id'])

    eval = Evaluation(
        lecturer_id=lecturer,
        student_id=student,
        #  unit_id=unit,
    )
    eval.save()
    eval_details = {}
    eval_details['evaluation_id'] = eval.id

    question_details = request.data['questions']
    for que_details in question_details:
        evaluation = Evaluation.objects.get(id=eval.id)
        que = Question(question=que_details['question'],
                       category=que_details['category'],
                       evaluation_id=evaluation,
                       rating=que_details['rating'])
        que.save()

    return Response({'success': "Evaluation successfully"},
                    status=status.HTTP_201_CREATED)
Example #3
0
def create_evaluation(request):
    """
    Endpoint: /users/create_user/
    Method: POST
    Allowed users: All user
    Response status code: 201 created
    Description: admin can create users of a
    """

    #  if not request.user.has_perm('users.add_user'):
    #     return Response({'error': 'can not create user'}, status=status.HTTP_403_FORBIDDEN)

    evaluation_details = request.data

    eval = Evaluation(
        lec_id=evaluation_details['lec_id'],
        student_id=evaluation_details['stud_id'],
    )
    eval.save()
    eval_details = {}
    eval_details['evaluation_id'] = eval.id

    # return Response({'success': "user added successfully"}, status=status.HTTP_201_CREATED)
    return Response(eval_details, status=status.HTTP_201_CREATED)
Example #4
0
def get_dict_for_old_evaluations_for_submission_part(subm_part):
    available_versions = Version.objects.get_for_object(subm_part)
    version_evals = []

    if available_versions.count() > 0:
        eval_part_versions, eval_versions = subm_part.get_all_evaluations_for_version(available_versions.count()-1)

        if eval_versions != None:
            for j in range(0, eval_versions.count()-1):
                if eval_versions[j].field_dict['evaluation_state'] != Evaluation.EVALUATION_STATE_NO_EVALUATION:
                    version_eval_dict = {}

                    version_eval_dict['feedback'] = eval_part_versions[j].field_dict['feedback']
                    version_eval_dict['assigned_points'] = eval_part_versions[j].field_dict['assigned_points']
                    version_eval_dict['user'] = CustomUser.objects.get(id=eval_versions[j].field_dict['user']).nickname
                    version_eval_dict['eval_state'] = Evaluation.get_eval_state_string(eval_versions[j].field_dict['evaluation_state'])
                    version_eval_dict['creation_date'] = eval_versions[j].field_dict['creation_date']

                    version_evals.append(version_eval_dict)

    return version_evals
Example #5
0
def evaluate(request):
    if request.method == "POST":
        ef = EvaluateForm(request.POST, request.FILES)
        if ef.is_valid():
            # user = auth.get_user(request)
            username = request.session.get("username", "anybody")
            user = User.objects.get(username=username)
            remark = ef.cleaned_data["remark"]
            testFile = ef.cleaned_data["testFile"]
            extractor = ef.cleaned_data["extractor"]
            mode = ef.cleaned_data["mode"]
            remarks = Evaluation.objects.filter(remark__exact=remark)
            if remarks:
                error = "remark has been used."
                return render_to_response("evaluation.html", {"ef": ef, "error": error})
            else:
                evaluations = Evaluation()
                evaluations.user = user
                evaluations.remark = remark
                evaluations.testFile = testFile
                evaluations.extractor = extractor
                evaluations.mode = mode
                if mode == "c2e":
                    evaluations.T_Bleu = 0.5826
                    evaluations.S_AmbiWord = 0.5
                    evaluations.S_NewWord = "None"
                    evaluations.S_Idiom = "None"
                    evaluations.S_OverLapWord = "None"
                    evaluations.S_Collocation = 1
                    evaluations.S_Noun = 0.6264
                    evaluations.S_Verb = 1
                    evaluations.S_Adj = 1
                    evaluations.S_Prep = "None"
                    evaluations.S_Adverb = "None"
                    evaluations.S_Pron = "None"
                    evaluations.S_NP = 0.4167
                    evaluations.S_VP = 0.6176
                    evaluations.S_PP = "None"
                    evaluations.S_QP = 0.7353
                    evaluations.S_ADVP = "None"
                    evaluations.S_ADJP = "None"
                    evaluations.S_Locate_Phr = 0.6
                    evaluations.S_Sub_Predi = "None"
                    evaluations.S_Predi_Obj = "None"
                    evaluations.S_Predi_Comp = "None"
                    evaluations.S_Prep_Obj = "None"
                    evaluations.S_Adv_MOD = "None"
                    evaluations.S_Adj_MOD = "None"
                    evaluations.S_Noun_MOD = "None"
                    evaluations.S_Cons_Verb = "None"
                    evaluations.S_Pivot_Phrase = "None"
                    evaluations.T_Noun = 0.6932
                    evaluations.T_Verb = 0.1667
                    evaluations.T_Adj = 0.2222
                    evaluations.T_Prep = 0.7317
                    evaluations.T_Adverb = 1
                    evaluations.T_Pron = 1
                    evaluations.T_Quantity = 0.9375
                    evaluations.T_AdjTrans = 0.75
                    evaluations.T_AdvTrans = "None"
                    evaluations.T_ModalVerb = "None"
                    evaluations.T_Article = "None"
                    evaluations.T_NP = 0.4401
                    evaluations.T_VP = 0.3475
                    evaluations.T_PP = 0.4252
                    evaluations.T_QP = 1
                    evaluations.T_ADVP = 0.1538
                    evaluations.T_ADJP = "None"
                    evaluations.T_WH_Prep_Phr = "None"
                    evaluations.T_WH_Adv_Phr = "None"
                    evaluations.T_WH_Noun_Phr = "None"
                    evaluations.T_CONJ_Phr = "None"
                    evaluations.T_Sub_Predi = 0.5349
                    evaluations.T_Predi_Obj = 0.4828
                    evaluations.T_Adv_MOD = 0.8182
                    evaluations.T_Adj_MOD = 0.4352
                    evaluations.T_Be_Predi = "None"
                    evaluations.S_BA_Sen = "None"
                    evaluations.S_BEI_Sen = "None"
                    evaluations.S_SHI_Sen = "None"
                    evaluations.S_YOU_Sen = "None"
                    evaluations.GeneralScore = 0.4298
                    evaluations.a = 1
                    evaluations.Words = 0.6611
                    evaluations.Phrases = 0.4126
                    evaluations.Sentences = "None"
                    evaluations.Source_Words = 0.7174
                    evaluations.Source_Phrases = 0.4114
                    evaluations.Source_FunctionalWords = "None"
                    evaluations.Source_SubstanWords = 0.7073
                    evaluations.Source_SpecialWords = 0.8
                    evaluations.Target_Words = 0.6598
                    evaluations.Target_Phrases = 0.4114
                    evaluations.Target_FunctionalWords = 0.7317
                    evaluations.Target_SubstanWords = 0.6598
                    evaluations.DefaultGroup = 0.4398
                if mode == "e2c":
                    evaluations.T_Bleu = 0.2286
                    # evaluations.S_AmbiWord
                    # evaluations.S_NewWord
                    # evaluations.S_Idiom
                    # evaluations.S_OverLapWord
                    evaluations.S_Collocation = 0.3333
                    evaluations.S_Noun = 0.4293
                    evaluations.S_Verb = 0.3333
                    evaluations.S_Adj = 0.3846
                    # evaluations.S_Prep
                    evaluations.S_Adverb = "None"
                    evaluations.S_Pron = "None"
                    evaluations.S_NP = 0.3153
                    evaluations.S_VP = 0.175
                    evaluations.S_PP = 0.2049
                    evaluations.S_QP = "None"
                    evaluations.S_ADVP = "None"
                    evaluations.S_ADJP = "None"
                    # evaluations.S_Locate_Phr
                    evaluations.S_Sub_Predi = "None"
                    evaluations.S_Predi_Obj = 0.0714
                    # evaluations.S_Predi_Comp
                    # evaluations.S_Prep_Obj
                    evaluations.S_Adv_MOD = "None"
                    evaluations.S_Adj_MOD = "None"
                    # evaluations.S_Noun_MOD
                    # evaluations.S_Cons_Verb
                    # evaluations.S_Pivot_Phrase
                    evaluations.T_Noun = 0.3739
                    evaluations.T_Verb = 0.1818
                    evaluations.T_Adj = "None"
                    evaluations.T_Prep = 0.8
                    evaluations.T_Adverb = 0
                    evaluations.T_Pron = 0
                    evaluations.T_Quantity = 0.0833
                    # evaluations.T_AdjTrans
                    # evaluations.T_AdvTrans
                    # evaluations.T_ModalVerb
                    # evaluations.T_Article
                    evaluations.T_NP = 0.217
                    evaluations.T_VP = 0.1076
                    evaluations.T_PP = 0.1412
                    evaluations.T_QP = 0.087
                    evaluations.T_ADVP = "None"
                    evaluations.T_ADJP = "None"
                    # evaluations.T_WH_Prep_Phr
                    # evaluations.T_WH_Adv_Phr
                    # evaluations.T_WH_Noun_Phr
                    # evaluations.T_CONJ_Phr
                    evaluations.T_Sub_Predi = 0.2438
                    evaluations.T_Predi_Obj = 0.1914
                    evaluations.T_Adv_MOD = 0.0532
                    evaluations.T_Adj_MOD = 0.4762
                    # evaluations.T_Be_Predi
                    # evaluations.S_BA_Sen
                    # evaluations.S_BEI_Sen
                    # evaluations.S_SHI_Sen
                    # evaluations.S_YOU_Sen

                    evaluations.S_DicPrep = "None"
                    evaluations.S_Acro = "None"
                    evaluations.S_MWE = "None"
                    evaluations.S_PhrVerb = "None"
                    evaluations.S_Hyperbaton = "None"
                    evaluations.S_AdjTrans = "None"
                    evaluations.s_AdvTrans = "None"
                    evaluations.S_WH_Prep_Phr = "None"
                    evaluations.S_WH_Adv_Phr = "None"
                    evaluations.S_WH_Noun_Phr = "None"
                    evaluations.S_CONJ_Phr = "None"
                    evaluations.S_Be_Predi = "None"
                    evaluations.T_Measure = 0.0667
                    evaluations.T_Locate_Phr = 0.1714
                    evaluations.T_Predi_Comp = "None"
                    evaluations.T_Prep_Obj = 0.1538
                    evaluations.T_Noun_MOD = 0.3137
                    evaluations.S_NormalPresentTense = "None"
                    evaluations.S_PostFutureTense = "None"
                    evaluations.S_FutureTense = "None"
                    evaluations.S_NormalPostTense = 0.3667
                    evaluations.S_PresentCompleteTense = "None"
                    evaluations.S_PostCompleteTense = "None"
                    evaluations.S_FutureCompleteTense = "None"
                    evaluations.S_PostFutureCompleteTense = "None"
                    evaluations.S_PresentDoingTense = "None"
                    evaluations.S_PostDoingTense = "None"
                    evaluations.S_FutureDoingTense = "None"
                    evaluations.S_PostFutureDoingTense = "None"
                    evaluations.S_PresentCompleteDoingTense = "None"
                    evaluations.S_PostCompleteDoingTense = "None"
                    evaluations.S_FutureCompleteDoingTense = "None"
                    evaluations.S_PostFutureCompleteDoingTense = "None"
                    evaluations.S_NormalPresentTense_NOT = "None"
                    evaluations.S_PostFutureTense_NOT = "None"
                    evaluations.S_FutureTense_NOT = "None"
                    evaluations.S_NormalPostTense_NOT = "None"
                    evaluations.S_PresentCompleteTense_NOT = "None"
                    evaluations.S_PostCompleteTense_NOT = "None"
                    evaluations.S_FutureCompleteTense_NOT = "None"
                    evaluations.S_PostFutureCompleteTense_NOT = "None"
                    evaluations.S_PresentDoingTense_NOT = "None"
                    evaluations.S_PostDoingTense = "None"
                    evaluations.S_FutureDoingTense = "None"
                    evaluations.S_PostFutureDoingTense_NOT = "None"
                    evaluations.S_PresentCompleteDoingTense_NOT = "None"
                    evaluations.S_PostCompleteDoingTense_NOT = "None"
                    evaluations.S_FutureCompleteDoingTense_NOT = "None"
                    evaluations.S_PostFutureCompleteDoingTense_NOT = "None"
                    evaluations.S_WhenClause = "None"
                    evaluations.S_WheneverClause = "None"
                    evaluations.S_WhileClause = "None"
                    evaluations.S_BeforeClause = "None"
                    evaluations.S_AfterClause = "None"
                    evaluations.S_UntilClause = "None"
                    evaluations.S_TillClause = "None"
                    evaluations.S_ByTheTimeClause = "None"
                    evaluations.S_AsSoonAsClause = "None"
                    evaluations.S_Hardly_WhenClause = "None"
                    evaluations.S_NoSooner_ThanClause = "None"
                    evaluations.S_TheMomentClause = "None"
                    evaluations.S_TheMinuteClause = "None"
                    evaluations.S_ImmediatelyClause = "None"
                    evaluations.S_DirectlyClause = "None"
                    evaluations.S_InstantlyClause = "None"
                    evaluations.S_BcauseClause = "None"
                    evaluations.S_SinceClause = "None"
                    evaluations.S_NotThatClause = "None"
                    evaluations.S_IfClause = "None"
                    evaluations.S_UnlessClause = "None"
                    evaluations.S_InCaseClause = "None"
                    evaluations.S_AsLongAsClause = "None"
                    evaluations.S_SoThatClause = "None"
                    evaluations.S_InOrderThatClause = "None"
                    evaluations.S_ForFearThatClause = "None"
                    evaluations.S_So_ThatClause = "None"
                    evaluations.S_Such_ThatClause = "None"
                    evaluations.S_ThanClause = "None"
                    evaluations.S_As_AsClause = "None"
                    evaluations.S_So_AsClause = "None"
                    evaluations.S_As_IfClause = "None"
                    evaluations.S_AsThoughClause = "None"
                    evaluations.S_ThoughClause = "None"
                    evaluations.S_AlthoughClause = "None"
                    evaluations.S_EvenIfClause = "None"
                    evaluations.S_EvenThough = "None"
                    evaluations.S_NoMatterWhatClause = "None"
                    evaluations.S_WhatEverClause = "None"
                    evaluations.S_NoMatterWhoClause = "None"
                    evaluations.S_WhoeverClause = "None"
                    evaluations.S_NoMatterWhichClause = "None"
                    evaluations.S_WhicheverClause = "None"
                    evaluations.S_NoMatterHowClause = "None"
                    evaluations.S_HoweverClause = "None"
                    evaluations.S_NoMatterWhenClause = "None"

                    evaluations.GeneralScore = 0.2125
                    # evaluations.a
                    evaluations.Words = 0.3382
                    evaluations.Phrases = 0.1894
                    evaluations.Sentences = 0.3667
                    evaluations.Source_Words = 0.4115
                    evaluations.Source_Phrases = 0.2365
                    evaluations.Source_FunctionalWords = "None"
                    evaluations.Source_SubstanWords = 0.4125
                    evaluations.Source_SpecialWords = 0.3333
                    evaluations.Target_Words = 0.3341
                    evaluations.Target_Phrases = 0.1884
                    evaluations.Target_FunctionalWords = 0.25
                    evaluations.Target_SubstanWords = 0.3436
                    evaluations.DefaultGroup = 0.2125

                    evaluations.b = 0.4293
                    evaluations.Result_Clauses = "None"
                    evaluations.Manner_Clauses = "None"
                    evaluations.Compare_Clauses = "None"
                    evaluations.Tense_Yes = 0.3667
                    evaluations.Tense_No = "None"
                    evaluations.Tense = 0.3667
                    evaluations.Reason_Clauses = "None"
                    evaluations.Concessive_Clauses = "None"
                    evaluations.Purpose_Clauses = "None"
                    evaluations.Clauses = "None"
                    evaluations.Condition_Clauses = "None"
                    evaluations.Time_Clauses = "None"

                evaluations.save()
                error = "successful."
                print username, remark
                return render_to_response("evaluation.html", {"ef": ef, "error": error})
    else:
        ef = EvaluateForm()
    return render_to_response("evaluation.html", {"ef": ef})
Example #6
0
def get_json_from_subms(submissions, elab_count_start, elab_count_end, tag_select_type, selected_tags, order):

    if tag_select_type=='all':
        for tag in selected_tags:
            submissions = submissions.filter(submissiontagrelation__submission_tag__name=tag)
    elif tag_select_type=='none':
        for tag in selected_tags:
            submissions = submissions.exclude(submissiontagrelation__submission_tag__name=tag)

    full_result_count = submissions.count()

    tag_statistics = {}

    subm_tags = SubmissionTag.objects.all()
    for st in subm_tags:
        tag_statistics[st.name] = SubmissionTagRelation.objects.filter(submission__in=submissions, submission_tag=st).count()


    #submissions = submissions.order_by('submission_date')
    submissions = submissions.order_by(order)
    #print(submissions.ordered)

    submissions = submissions[elab_count_start:elab_count_end]

    elab_count = 0

    eval_dict = {}
    elab_dict = {}

    for subm in submissions:

        #check for locks on this submission

        try:
            lock = SubmissionLock.objects.get(submission=subm, release_date__gt=timezone.localtime(timezone.now()))
            subm_lock_dict={}
            subm_lock_dict['nickname'] = lock.user.nickname
            subm_lock_dict['timestamp'] = lock.lock_date
        except ObjectDoesNotExist:
            subm_lock_dict={}

        #check for tags on this submission
        elab = subm.elaboration
        subm_tag_rels = SubmissionTagRelation.objects.select_related(depth=1).filter(submission_id=subm.id)

        tags = []

        for relation in subm_tag_rels:
            tag_name = relation.submission_tag.name
            tags.append(tag_name)

        #check for submission state
        subm_state = subm.submission_state
        for states in Submission.SUBMISSION_STATES:
            if states[0]==subm_state:
                subm_state=states[1]

        #check for versions of this submission
        subm_versions = Version.objects.get_for_object(subm)

        #print(subm_versions)

        version_dates = []
        for version in subm_versions:
            version_date = version.field_dict['submission_date']
            submission_date = subm.submission_date
            #the current entries in the database are not a version yet (in reversion)
            #if the submission_date of the current entry in the database is equal to a already versioned submission,
            #there was no change. otherwise it is a new submission, therefore a "version".
            if version_date != submission_date:
                version_dates.append(version_date)

        #TODO for each user
        email = elab.user.email
        size = 40
        gravatar_url = "http://robohash.org/" + hashlib.md5(email.lower()).hexdigest() + "?gravatar=hashed"
        gravatar_url += urllib.urlencode({'d':'identicon', 's':str(size)})



        this_elab = {}

        this_elab['title'] = elab.main_challenge.title
        this_elab['elaboration_id'] = elab.id
        this_elab['submission_id'] = subm.id
        this_elab['submission_tags'] = tags
        this_elab['submission_state'] = subm_state
        this_elab['submission_date'] = subm.submission_date
        this_elab['submission_version_dates'] = version_dates
        this_elab['submission_lock'] = subm_lock_dict

        #TODO adapt to multiple submitters
        user_dict = {}
        user_dict['nickname'] = elab.user.nickname
        user_dict['fullname'] = elab.user.first_name + ' ' + elab.user.last_name
        user_dict['gravatar_hash']=gravatar_url

        this_elab['user'] = user_dict

        elaboration_parts = ElaborationPart.objects.select_related().filter(elaboration=elab)

        this_elab['elaboration_parts_count'] = elaboration_parts.count()

        try:
            eval = Evaluation.objects.select_related().get(submission=subm)

            this_elab['evaluation_date'] = eval.creation_date
            this_elab['evaluation_state'] = Evaluation.get_eval_state_string(eval.evaluation_state)
            this_elab['evaluation_author'] = eval.user.nickname
        except ObjectDoesNotExist:
            eval = None
            this_elab['evaluation_date'] = 'none'

        elab_parts_dict = {}
        i=0

        for elab_part in elaboration_parts:
            elab_part_dict = {}
            elab_part_dict['subtitle']=elab_part.sub_challenge.title
            #elab_part_dict['nickname']=elab_part.user.nickname
            #elab_part_dict['fullname']=elab_part.user.first_name + ' ' + elab_part.user.last_name
            elab_part_dict['elaboration_part_id']=elab_part.id

            try:
                subm_part = SubmissionPart.objects.get(elaboration_part=elab_part, submission=subm)
                eval_part = EvaluationPart.objects.get(evaluation=eval, submission_part=subm_part)

                feedback = eval_part.feedback
                assigned_points = eval_part.assigned_points
            except ObjectDoesNotExist:
                feedback = ''
                assigned_points = ''

            if feedback == None:
                feedback = ''

            if assigned_points == None:
                assigned_points = ''

            version_evals = get_dict_for_old_evaluations_for_submission_part(subm_part);
            elab_part_dict['eval_versions' + str(i)] = version_evals

            elab_part_dict['feedback'] = feedback
            elab_part_dict['assigned_points'] = assigned_points
            elab_part_dict['expected_points'] = elab_part.elaboration_points
            elab_part_dict['elaboration_text'] = elab_part.elaboration_text

            #images
            uploads_dict = {}
            uploads = Upload.objects.filter(elaboration_part=elab_part.id)

            if uploads:
                elab_parts_uploads = []
                for upload in uploads:
                    upload_dict = {}
                    upload_dict['type'] = upload.docfile.url[-3:]
                    upload_dict['path'] = urllib.unquote(upload.docfile.url)
                    elab_parts_uploads.append(upload_dict)
                elab_part_dict['uploads'] = elab_parts_uploads


            elab_parts_dict['elaboration_part'+str(i)]=elab_part_dict
            i += 1

            this_elab['elaboration_parts'] = elab_parts_dict

        elab_dict['elab'+str(elab_count)] = this_elab
        elab_count += 1

    eval_dict['tag_statistics'] = tag_statistics
    eval_dict['full_result_count'] = full_result_count
    eval_dict['elab_dict'] = elab_dict

    return eval_dict
Example #7
0
def get_version(request, course_short_title):
    if request.is_ajax():

        subm_id = request.POST['submission_id']
        version_nr = request.POST['version']

        if version_nr != 'current':
            version_nr = int(version_nr)

        version_elements = []

        subm = Submission.objects.get(id=subm_id)

        #Version.objects.filter(content_type=ContentType.objects.get_for_model(Submission), revision__date_created__gte=)

        subm_version_count = Version.objects.get_for_object(subm).count()

        if version_nr=='current':
            subm_date = subm.submission_date
        else:
            subm_version = Version.objects.get_for_object(subm)[version_nr]
            subm_date = subm_version.field_dict['submission_date']

        parts = ElaborationPart.objects.filter(elaboration=subm.elaboration)

        i = 0
        for part in parts:
            part_dict = {}

            text_element = part.elaboration_text

            if version_nr=='current':
                version_text = text_element
            else:
                text_versions = Version.objects.get_for_object(part)
                text = text_versions[version_nr]
                version_text = text.field_dict['elaboration_text']

            part_dict['current' + str(i)] = text_element
            part_dict['part' + str(i)] = version_text

            #load old evaluations
            version_evals = []

            subm_part = SubmissionPart.objects.get(elaboration_part=part, submission=subm)

            if version_nr=='current':
                version_evals = get_dict_for_old_evaluations_for_submission_part(subm_part)
            else:
                eval_part_versions, eval_versions = subm_part.get_all_evaluations_for_version(version_nr)

                #if current version requested, leave out current evaluation
                nr_of_versions = eval_versions.count()
                #if subm_version_count-1 == version_nr:
                #    nr_of_versions -= 1

                for j in range(0, nr_of_versions):
                    if eval_versions[j].field_dict['evaluation_state'] != Evaluation.EVALUATION_STATE_NO_EVALUATION:
                        version_eval_dict = {}

                        version_eval_dict['feedback'] = eval_part_versions[j].field_dict['feedback']
                        version_eval_dict['assigned_points'] = eval_part_versions[j].field_dict['assigned_points']
                        version_eval_dict['user'] = CustomUser.objects.get(id=eval_versions[j].field_dict['user']).nickname
                        version_eval_dict['eval_state'] =  Evaluation.get_eval_state_string(eval_versions[j].field_dict['evaluation_state'])
                        version_eval_dict['creation_date'] = eval_versions[j].field_dict['creation_date']

                        version_evals.append(version_eval_dict)

            part_dict['eval_versions' + str(i)] = version_evals
            version_elements.append(part_dict)
            i += 1

        #print(version_elements)
        response = simplejson.dumps(version_elements, cls=DjangoJSONEncoder)

        return HttpResponse(response, mimetype='application/json')
Example #8
0
def save_eval(request, course_short_title):
    if request.is_ajax():

        #for key, value in request.POST.iteritems():
        #    print(key + " " + value)

        elab_id = request.POST['elaboration_id']
        publish = request.POST['publish']
        elab_parts_count = request.POST['elaboration_parts_count']
        elab_parts = simplejson.loads(request.POST['elab_parts'])
        user = CustomUser.objects.get(username=request.user.username)
        subm = Submission.objects.get(elaboration_id=elab_id)

        if publish == 'true':
            publish = True
        elif publish == 'false':
            publish = False

        re={}

        with reversion.create_revision():
            try:
                eval = Evaluation.objects.get(submission=subm)
                eval_exists = True
            except ObjectDoesNotExist:
                eval = Evaluation(submission=subm, user=user)
                eval_exists = False
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_STARTED
                eval.save()

            for i in range (0, int(elab_parts_count)):
                elab_part_dict = elab_parts[i]
                eval_text = elab_part_dict['evaluation_text']
                eval_points = elab_part_dict['evaluation_points']
                elab_part_id = elab_part_dict['elaboration_part_id']

                eval_text = strip_tags(eval_text)

                if eval_points == '':
                    eval_points = None

                elab_part = ElaborationPart.objects.get(id=elab_part_id)

                if eval_exists:
                    #eval = Evaluation.objects.get(submission=subm)
                    subm_part = SubmissionPart.objects.get(elaboration_part=elab_part, submission=subm)
                    eval_part = EvaluationPart.objects.get(evaluation=eval, submission_part=subm_part)
                    eval_part.feedback = eval_text
                    eval_part.assigned_points = eval_points
                    eval_part.save()
                    re['response']='updated'
                else:
                    subm_part = SubmissionPart.objects.get(submission=subm, elaboration_part=elab_part)
                    EvaluationPart(evaluation=eval, submission_part=subm_part, feedback=eval_text, assigned_points=eval_points).save()
                    re['response']='created'


            #create a version of all elaboration + submission objects
            #at the time of the first evaluation of a new submission
            if not eval_exists or eval.evaluation_state==Evaluation.EVALUATION_STATE_NO_EVALUATION:
                elab = Elaboration.objects.get(id=elab_id)

                elab.save()
                subm.save()

                for i in range (0, int(elab_parts_count)):

                    elab_part_dict = elab_parts[i]
                    elab_part_id = elab_part_dict['elaboration_part_id']

                    elab_part = ElaborationPart.objects.get(id=elab_part_id)
                    subm_part = SubmissionPart.objects.get(submission=subm, elaboration_part=elab_part)

                    elab_part.save()
                    subm_part.save()

            #save states
            if publish:
                subm.submission_state=Submission.SUBMISSION_STATE_EVALUATED
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_PUBLISHED
            else:
                subm.submission_state=Submission.SUBMISSION_STATE_WAITING_FOR_EVALUATION
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_STARTED

            eval.creation_date=timezone.localtime(timezone.now())
            eval.save()


        subm.save()

        lock = SubmissionLock.objects.get(submission=subm, user=user, release_date__gt=timezone.localtime(timezone.now()))
        lock.release_date = timezone.localtime(timezone.now())
        lock.save()
        re['lock']='resolved'

        response = simplejson.dumps(re)

        return HttpResponse(response, mimetype='application/json')
Example #9
0
def create_test_challenge(request, course_short_title):
    if request.is_ajax():
        step = request.POST['step']

        if int(step)==0:
            #create a elab+subm
            print('create test elab+subm')

            elaboration = Elaboration(id=10000, main_challenge_id=200, portfolio_id=1, user_id=1)
            elaboration.creation_date=datetime.datetime(2013, 2, 16, 15, 24, 17, 345952, tzinfo=utc)
            elaboration.save()

            elaboration_part = ElaborationPart(elaboration=elaboration, sub_challenge_id=200,
                elaboration_text='this is some test text for subchallenge 1 version 1', elaboration_points=4)
            elaboration_part.save()
            elaboration_part2 = ElaborationPart(elaboration=elaboration, sub_challenge_id=201,
                elaboration_text='this is some test text for subchallenge 2 version 1', elaboration_points=4)
            elaboration_part2.save()
            submission = Submission(id=10000, elaboration=elaboration, submission_date=datetime.datetime(2013, 2, 18, 15, 24, 17, 345952, tzinfo=utc))
            submission.save()

            submission.submission_date = datetime.datetime(2013, 2, 18, 15, 24, 17, 345952, tzinfo=utc)
            submission.save()

            submission_part = SubmissionPart(submission=submission, elaboration_part=elaboration_part)
            submission_part.save()

            submission_part2 = SubmissionPart(submission=submission, elaboration_part=elaboration_part2)
            submission_part2.save()
        elif int(step)==1:
            #create a evaluation
            print('create test evaluation')
            with reversion.create_revision():

                elaboration = Elaboration.objects.get(id=10000)
                elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
                elaboration_part.save()
                elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)
                elaboration_part2.save()

                submission = Submission.objects.get(id=10000)
                submission.submission_state = Submission.SUBMISSION_STATE_EVALUATED
                submission.save()

                submission_part = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part)
                submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

                submission_part.save()
                submission_part2.save()

                evaluation = Evaluation(id=10000, submission=submission, user_id=2, creation_date=timezone.localtime(timezone.now()), evaluation_state=Evaluation.EVALUATION_STATE_EVALUATION_PUBLISHED)
                evaluation.creation_date=datetime.datetime(2013, 2, 20, 15, 24, 17, 345952, tzinfo=utc)
                evaluation.save()

                eval_part = EvaluationPart(evaluation=evaluation, submission_part=submission_part, feedback='this is version 1 of the evaluation of version 1 of the subchallenge 1', assigned_points=3)
                eval_part.save()

                eval_part2 = EvaluationPart(evaluation=evaluation, submission_part=submission_part2, feedback='this is version 1 of the evaluation of version 1 of the subchallenge 2', assigned_points=2)
                eval_part2.save()

        elif int(step)==2:
            #create a second evaluation
            print('create test 2nd eval')
            with reversion.create_revision():
                submission = Submission.objects.get(id=10000)
                elaboration = Elaboration.objects.get(id=10000)

                elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
                elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)

                submission_part = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part)
                submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

                evaluation = Evaluation.objects.get(id=10000)
                evaluation.creation_date=datetime.datetime(2013, 2, 22, 15, 24, 17, 345952, tzinfo=utc)
                evaluation.save()

                eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
                eval_part.feedback = 'this is version 2 of the evaluation of version 1 of the subchallenge 1'
                eval_part.assigned_points = 4
                eval_part.save()

                eval_part2 = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part2)
                eval_part2.feedback = 'this is version 2 of the evaluation of version 1 of the subchallenge 2'
                eval_part2.assigned_points = 4
                eval_part2.save()
        elif int(step)==3:
            #revise the submission
            print('revise the subm')

            submission = Submission.objects.get(id=10000)
            submission.submission_state = Submission.SUBMISSION_STATE_BEING_REVISED
            submission.save()
        elif int(step)==4:
            #create a second version of the elab + submit again
            print('create test 2nd elab+subm')
            elaboration = Elaboration.objects.get(id=10000)

            elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
            elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)

            elaboration_part.elaboration_text = 'this is some test created elaboration for subchallenge 1, version 2'
            elaboration_part.save()

            elaboration_part2.elaboration_text = 'this is some test created elaboration for subchallenge 2, version 2'
            elaboration_part2.save()

            submission = Submission.objects.get(id=10000)
            submission_part = SubmissionPart.objects.get(submission = submission, elaboration_part = elaboration_part)
            submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

            submission.submission_date=datetime.datetime(2013, 2, 24, 15, 24, 17, 345952, tzinfo=utc)
            submission.submission_state = Submission.SUBMISSION_STATE_WAITING_FOR_EVALUATION
            submission.save()
            submission_part.save()
            submission_part2.save()

            """
            evaluation = Evaluation.objects.get(id=10000)
            eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
            eval_part.delete()
            evaluation.delete()
             """

            evaluation = Evaluation.objects.get(id=10000)
            evaluation.creation_date=datetime.datetime(2013, 2, 26, 15, 24, 17, 345952, tzinfo=utc)
            evaluation.evaluation_state=Evaluation.EVALUATION_STATE_NO_EVALUATION
            evaluation.save()

            eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
            eval_part.feedback = ''
            eval_part.assigned_points = None
            eval_part.save()

            eval_part2 = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part2)
            eval_part2.feedback = ''
            eval_part2.assigned_points = None
            eval_part2.save()

        return HttpResponse({}, mimetype='application/json')
Example #10
0
def evalresult_csv_parse(filename):
    questions = {}

    with io.open(filename, 'r', encoding='utf-8') as csvfile:
        csvreader = unicode_csv_reader(csvfile, delimiter=',', quotechar='"')
        for row in csvreader:
            print ".",
            if row[0] == "evaluation":
                # Evaluation data, should be first line
                cureval = Evaluation(title=row[1], etype="PMC")  # , date=row[2], comments=row[3])
                cureval.save()

            elif row[0] == "comment":
                # Commentaires sur la EvalItem
                for col, quest in enumerate(row[4:]):
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                    questions[col].comment = quest
                    questions[col].save()

            elif row[0] == "question":
                # Description de la EvalItem
                for col, quest in enumerate(row[4:]):
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                    questions[col].content = quest
                    questions[col].save()

            elif row[0] in ["points", "bareme"]:
                # Barème
                for col, quest in enumerate(row[4:]):
                    bareme = float(quest.replace(",", "."))
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                        questions[col].save()
                    point = EvalItemPoint(item=questions[col], point=bareme, ptype="")
                    point.save()

            elif row[0] == "refitem":
                # Elément de référence
                for col, quest in enumerate(row[4:]):
                    if quest != "":
                        refitemid = int(quest)
                        refitem = EvalRefItem.objects.get(pk=refitemid)
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                        questions[col].save()
                    ref = EvalItemEvalref(item=questions[col], evalref=refitem, title="", rtype="")
                    ref.save()

            elif row[0] == "student":
                # Etudiant
                student = EUser(first_name=row[3], last_name=row[2])
                student.username = unique_username(student)
                student.save()
                student.student.is_student = True
                student.student.save()
                paper = EvalPaper(evaluation=cureval, ufor=student, uby=student)
                paper.save()
                for col, res in enumerate(row[4:]):
                    answer = EvalPaperAnswer(paper=paper, evalitem=questions[col])
                    answer.save()
                    corr = EvalPaperEvalitem(evalitem=questions[col], answer=answer, uby=student, status="COR")
                    corr.save()
                    if quest != "":
                        points = EvalPaperEvalitemPoint(
                            pevalitem=corr, pointref=questions[col].points.all()[0],
                            result=float(quest.replace(',', '.')), status="", ptype="")
                        points.save()
Example #11
0
    def post(request):
        email = request.POST.get("email", "")
        answers = request.POST.get("answers", [])

        answer_list = answers.split("#")[:-1]

        green, yellow, blue, red = 0, 0, 0, 0

        for i in range(len(answer_list)):
            xg_instance = Evaluation()
            xg_instance.exam_index = 1
            xg_instance.email = email

            xg_instance.question_index = i + 1
            xg_instance.answer_index = answer_list[i]
            xg_instance.date = datetime.datetime.now().strftime(
                '%Y-%m-%d %H:%M:%S')
            xg_instance.save()

            if i <= 14:
                if answer_list[i] == '1':
                    green += 1
                elif answer_list[i] == '2':
                    yellow += 1
                elif answer_list[i] == '3':
                    blue += 1
                else:
                    red += 1
            else:
                if answer_list[i] == '1':
                    red += 1
                elif answer_list[i] == '2':
                    blue += 1
                elif answer_list[i] == '3':
                    yellow += 1
                else:
                    green += 1

        score_list = [green, yellow, blue, red]
        max_score = max(score_list)
        max_index = score_list.index(max(score_list))

        if max_score < 9:
            return render(request, "evaluation/xg-complex-result.html", {})

        else:

            if max_index == 0:
                result_colour = constants.PERSONALITY[0][0]
                result_tags = constants.PERSONALITY[0][1]
                result_comment = constants.PERSONALITY[0][2]
                result_br = constants.PERSONALITY[0][3]

            if max_index == 1:
                result_colour = constants.PERSONALITY[1][0]
                result_tags = constants.PERSONALITY[1][1]
                result_comment = constants.PERSONALITY[1][2]
                result_br = constants.PERSONALITY[1][3]

            if max_index == 2:
                result_colour = constants.PERSONALITY[2][0]
                result_tags = constants.PERSONALITY[2][1]
                result_comment = constants.PERSONALITY[2][2]
                result_br = constants.PERSONALITY[2][3]

            if max_index == 3:
                result_colour = constants.PERSONALITY[3][0]
                result_tags = constants.PERSONALITY[3][1]
                result_comment = constants.PERSONALITY[3][2]
                result_br = constants.PERSONALITY[3][3]

            return render(
                request, "evaluation/xg-result.html", {
                    "result_colour": result_colour,
                    "result_tags": result_tags,
                    "result_comment": result_comment.splitlines(),
                    "result_br": result_br
                })