コード例 #1
0
    def create_evaluation(self, form_dict):
        evaluator = form_dict["demographics"].save(commit=False)
        evaluator.email = form_dict["email"].cleaned_data.get("email", None)
        evaluator.save()

        started = arrow.get(
            dict(self.storage.get_step_data("start"))["first_open"])
        first_continue = arrow.get(
            dict(self.storage.get_step_data("start"))["answered"])
        evaluation = Evaluation(platform=self.platform,
                                evaluator=evaluator,
                                started=started.datetime,
                                first_continue=first_continue.datetime)

        evaluation.save()

        for i, question in enumerate(self.platform.questions.all()):
            step = "q_%s" % i
            answer = form_dict[step].save(commit=False)
            answer.evaluation = evaluation
            answer.question = question
            answer.answer_time = arrow.get(
                dict(self.storage.get_step_data(step))["answered"]).datetime
            answer.save()

        return evaluation
コード例 #2
0
def create_evaluation(request):
    """
    Endpoint: /users/create_user/
    Method: POST
    Allowed users: All user
    Response status code: 201 created
    Description: admin can create users of a
    """

    #  if not request.user.has_perm('users.add_user'):
    #     return Response({'error': 'can not create user'}, status=status.HTTP_403_FORBIDDEN)

    evaluation_details = request.data
    lecturer = User.objects.get(id=evaluation_details['lecturer_id'])
    student = User.objects.get(id=evaluation_details['student_id'])
    #unit = Unit.objects.get(id=evaluation_details['unit_id'])

    eval = Evaluation(
        lecturer_id=lecturer,
        student_id=student,
        #  unit_id=unit,
    )
    eval.save()
    eval_details = {}
    eval_details['evaluation_id'] = eval.id

    question_details = request.data['questions']
    for que_details in question_details:
        evaluation = Evaluation.objects.get(id=eval.id)
        que = Question(question=que_details['question'],
                       category=que_details['category'],
                       evaluation_id=evaluation,
                       rating=que_details['rating'])
        que.save()

    return Response({'success': "Evaluation successfully"},
                    status=status.HTTP_201_CREATED)
コード例 #3
0
ファイル: views.py プロジェクト: ElvinEga/djangousersbackend
def create_evaluation(request):
    """
    Endpoint: /users/create_user/
    Method: POST
    Allowed users: All user
    Response status code: 201 created
    Description: admin can create users of a
    """

    #  if not request.user.has_perm('users.add_user'):
    #     return Response({'error': 'can not create user'}, status=status.HTTP_403_FORBIDDEN)

    evaluation_details = request.data

    eval = Evaluation(
        lec_id=evaluation_details['lec_id'],
        student_id=evaluation_details['stud_id'],
    )
    eval.save()
    eval_details = {}
    eval_details['evaluation_id'] = eval.id

    # return Response({'success': "user added successfully"}, status=status.HTTP_201_CREATED)
    return Response(eval_details, status=status.HTTP_201_CREATED)
コード例 #4
0
ファイル: views.py プロジェクト: vincent101/Woodpecker
def evaluate(request):
    if request.method == "POST":
        ef = EvaluateForm(request.POST, request.FILES)
        if ef.is_valid():
            # user = auth.get_user(request)
            username = request.session.get("username", "anybody")
            user = User.objects.get(username=username)
            remark = ef.cleaned_data["remark"]
            testFile = ef.cleaned_data["testFile"]
            extractor = ef.cleaned_data["extractor"]
            mode = ef.cleaned_data["mode"]
            remarks = Evaluation.objects.filter(remark__exact=remark)
            if remarks:
                error = "remark has been used."
                return render_to_response("evaluation.html", {"ef": ef, "error": error})
            else:
                evaluations = Evaluation()
                evaluations.user = user
                evaluations.remark = remark
                evaluations.testFile = testFile
                evaluations.extractor = extractor
                evaluations.mode = mode
                if mode == "c2e":
                    evaluations.T_Bleu = 0.5826
                    evaluations.S_AmbiWord = 0.5
                    evaluations.S_NewWord = "None"
                    evaluations.S_Idiom = "None"
                    evaluations.S_OverLapWord = "None"
                    evaluations.S_Collocation = 1
                    evaluations.S_Noun = 0.6264
                    evaluations.S_Verb = 1
                    evaluations.S_Adj = 1
                    evaluations.S_Prep = "None"
                    evaluations.S_Adverb = "None"
                    evaluations.S_Pron = "None"
                    evaluations.S_NP = 0.4167
                    evaluations.S_VP = 0.6176
                    evaluations.S_PP = "None"
                    evaluations.S_QP = 0.7353
                    evaluations.S_ADVP = "None"
                    evaluations.S_ADJP = "None"
                    evaluations.S_Locate_Phr = 0.6
                    evaluations.S_Sub_Predi = "None"
                    evaluations.S_Predi_Obj = "None"
                    evaluations.S_Predi_Comp = "None"
                    evaluations.S_Prep_Obj = "None"
                    evaluations.S_Adv_MOD = "None"
                    evaluations.S_Adj_MOD = "None"
                    evaluations.S_Noun_MOD = "None"
                    evaluations.S_Cons_Verb = "None"
                    evaluations.S_Pivot_Phrase = "None"
                    evaluations.T_Noun = 0.6932
                    evaluations.T_Verb = 0.1667
                    evaluations.T_Adj = 0.2222
                    evaluations.T_Prep = 0.7317
                    evaluations.T_Adverb = 1
                    evaluations.T_Pron = 1
                    evaluations.T_Quantity = 0.9375
                    evaluations.T_AdjTrans = 0.75
                    evaluations.T_AdvTrans = "None"
                    evaluations.T_ModalVerb = "None"
                    evaluations.T_Article = "None"
                    evaluations.T_NP = 0.4401
                    evaluations.T_VP = 0.3475
                    evaluations.T_PP = 0.4252
                    evaluations.T_QP = 1
                    evaluations.T_ADVP = 0.1538
                    evaluations.T_ADJP = "None"
                    evaluations.T_WH_Prep_Phr = "None"
                    evaluations.T_WH_Adv_Phr = "None"
                    evaluations.T_WH_Noun_Phr = "None"
                    evaluations.T_CONJ_Phr = "None"
                    evaluations.T_Sub_Predi = 0.5349
                    evaluations.T_Predi_Obj = 0.4828
                    evaluations.T_Adv_MOD = 0.8182
                    evaluations.T_Adj_MOD = 0.4352
                    evaluations.T_Be_Predi = "None"
                    evaluations.S_BA_Sen = "None"
                    evaluations.S_BEI_Sen = "None"
                    evaluations.S_SHI_Sen = "None"
                    evaluations.S_YOU_Sen = "None"
                    evaluations.GeneralScore = 0.4298
                    evaluations.a = 1
                    evaluations.Words = 0.6611
                    evaluations.Phrases = 0.4126
                    evaluations.Sentences = "None"
                    evaluations.Source_Words = 0.7174
                    evaluations.Source_Phrases = 0.4114
                    evaluations.Source_FunctionalWords = "None"
                    evaluations.Source_SubstanWords = 0.7073
                    evaluations.Source_SpecialWords = 0.8
                    evaluations.Target_Words = 0.6598
                    evaluations.Target_Phrases = 0.4114
                    evaluations.Target_FunctionalWords = 0.7317
                    evaluations.Target_SubstanWords = 0.6598
                    evaluations.DefaultGroup = 0.4398
                if mode == "e2c":
                    evaluations.T_Bleu = 0.2286
                    # evaluations.S_AmbiWord
                    # evaluations.S_NewWord
                    # evaluations.S_Idiom
                    # evaluations.S_OverLapWord
                    evaluations.S_Collocation = 0.3333
                    evaluations.S_Noun = 0.4293
                    evaluations.S_Verb = 0.3333
                    evaluations.S_Adj = 0.3846
                    # evaluations.S_Prep
                    evaluations.S_Adverb = "None"
                    evaluations.S_Pron = "None"
                    evaluations.S_NP = 0.3153
                    evaluations.S_VP = 0.175
                    evaluations.S_PP = 0.2049
                    evaluations.S_QP = "None"
                    evaluations.S_ADVP = "None"
                    evaluations.S_ADJP = "None"
                    # evaluations.S_Locate_Phr
                    evaluations.S_Sub_Predi = "None"
                    evaluations.S_Predi_Obj = 0.0714
                    # evaluations.S_Predi_Comp
                    # evaluations.S_Prep_Obj
                    evaluations.S_Adv_MOD = "None"
                    evaluations.S_Adj_MOD = "None"
                    # evaluations.S_Noun_MOD
                    # evaluations.S_Cons_Verb
                    # evaluations.S_Pivot_Phrase
                    evaluations.T_Noun = 0.3739
                    evaluations.T_Verb = 0.1818
                    evaluations.T_Adj = "None"
                    evaluations.T_Prep = 0.8
                    evaluations.T_Adverb = 0
                    evaluations.T_Pron = 0
                    evaluations.T_Quantity = 0.0833
                    # evaluations.T_AdjTrans
                    # evaluations.T_AdvTrans
                    # evaluations.T_ModalVerb
                    # evaluations.T_Article
                    evaluations.T_NP = 0.217
                    evaluations.T_VP = 0.1076
                    evaluations.T_PP = 0.1412
                    evaluations.T_QP = 0.087
                    evaluations.T_ADVP = "None"
                    evaluations.T_ADJP = "None"
                    # evaluations.T_WH_Prep_Phr
                    # evaluations.T_WH_Adv_Phr
                    # evaluations.T_WH_Noun_Phr
                    # evaluations.T_CONJ_Phr
                    evaluations.T_Sub_Predi = 0.2438
                    evaluations.T_Predi_Obj = 0.1914
                    evaluations.T_Adv_MOD = 0.0532
                    evaluations.T_Adj_MOD = 0.4762
                    # evaluations.T_Be_Predi
                    # evaluations.S_BA_Sen
                    # evaluations.S_BEI_Sen
                    # evaluations.S_SHI_Sen
                    # evaluations.S_YOU_Sen

                    evaluations.S_DicPrep = "None"
                    evaluations.S_Acro = "None"
                    evaluations.S_MWE = "None"
                    evaluations.S_PhrVerb = "None"
                    evaluations.S_Hyperbaton = "None"
                    evaluations.S_AdjTrans = "None"
                    evaluations.s_AdvTrans = "None"
                    evaluations.S_WH_Prep_Phr = "None"
                    evaluations.S_WH_Adv_Phr = "None"
                    evaluations.S_WH_Noun_Phr = "None"
                    evaluations.S_CONJ_Phr = "None"
                    evaluations.S_Be_Predi = "None"
                    evaluations.T_Measure = 0.0667
                    evaluations.T_Locate_Phr = 0.1714
                    evaluations.T_Predi_Comp = "None"
                    evaluations.T_Prep_Obj = 0.1538
                    evaluations.T_Noun_MOD = 0.3137
                    evaluations.S_NormalPresentTense = "None"
                    evaluations.S_PostFutureTense = "None"
                    evaluations.S_FutureTense = "None"
                    evaluations.S_NormalPostTense = 0.3667
                    evaluations.S_PresentCompleteTense = "None"
                    evaluations.S_PostCompleteTense = "None"
                    evaluations.S_FutureCompleteTense = "None"
                    evaluations.S_PostFutureCompleteTense = "None"
                    evaluations.S_PresentDoingTense = "None"
                    evaluations.S_PostDoingTense = "None"
                    evaluations.S_FutureDoingTense = "None"
                    evaluations.S_PostFutureDoingTense = "None"
                    evaluations.S_PresentCompleteDoingTense = "None"
                    evaluations.S_PostCompleteDoingTense = "None"
                    evaluations.S_FutureCompleteDoingTense = "None"
                    evaluations.S_PostFutureCompleteDoingTense = "None"
                    evaluations.S_NormalPresentTense_NOT = "None"
                    evaluations.S_PostFutureTense_NOT = "None"
                    evaluations.S_FutureTense_NOT = "None"
                    evaluations.S_NormalPostTense_NOT = "None"
                    evaluations.S_PresentCompleteTense_NOT = "None"
                    evaluations.S_PostCompleteTense_NOT = "None"
                    evaluations.S_FutureCompleteTense_NOT = "None"
                    evaluations.S_PostFutureCompleteTense_NOT = "None"
                    evaluations.S_PresentDoingTense_NOT = "None"
                    evaluations.S_PostDoingTense = "None"
                    evaluations.S_FutureDoingTense = "None"
                    evaluations.S_PostFutureDoingTense_NOT = "None"
                    evaluations.S_PresentCompleteDoingTense_NOT = "None"
                    evaluations.S_PostCompleteDoingTense_NOT = "None"
                    evaluations.S_FutureCompleteDoingTense_NOT = "None"
                    evaluations.S_PostFutureCompleteDoingTense_NOT = "None"
                    evaluations.S_WhenClause = "None"
                    evaluations.S_WheneverClause = "None"
                    evaluations.S_WhileClause = "None"
                    evaluations.S_BeforeClause = "None"
                    evaluations.S_AfterClause = "None"
                    evaluations.S_UntilClause = "None"
                    evaluations.S_TillClause = "None"
                    evaluations.S_ByTheTimeClause = "None"
                    evaluations.S_AsSoonAsClause = "None"
                    evaluations.S_Hardly_WhenClause = "None"
                    evaluations.S_NoSooner_ThanClause = "None"
                    evaluations.S_TheMomentClause = "None"
                    evaluations.S_TheMinuteClause = "None"
                    evaluations.S_ImmediatelyClause = "None"
                    evaluations.S_DirectlyClause = "None"
                    evaluations.S_InstantlyClause = "None"
                    evaluations.S_BcauseClause = "None"
                    evaluations.S_SinceClause = "None"
                    evaluations.S_NotThatClause = "None"
                    evaluations.S_IfClause = "None"
                    evaluations.S_UnlessClause = "None"
                    evaluations.S_InCaseClause = "None"
                    evaluations.S_AsLongAsClause = "None"
                    evaluations.S_SoThatClause = "None"
                    evaluations.S_InOrderThatClause = "None"
                    evaluations.S_ForFearThatClause = "None"
                    evaluations.S_So_ThatClause = "None"
                    evaluations.S_Such_ThatClause = "None"
                    evaluations.S_ThanClause = "None"
                    evaluations.S_As_AsClause = "None"
                    evaluations.S_So_AsClause = "None"
                    evaluations.S_As_IfClause = "None"
                    evaluations.S_AsThoughClause = "None"
                    evaluations.S_ThoughClause = "None"
                    evaluations.S_AlthoughClause = "None"
                    evaluations.S_EvenIfClause = "None"
                    evaluations.S_EvenThough = "None"
                    evaluations.S_NoMatterWhatClause = "None"
                    evaluations.S_WhatEverClause = "None"
                    evaluations.S_NoMatterWhoClause = "None"
                    evaluations.S_WhoeverClause = "None"
                    evaluations.S_NoMatterWhichClause = "None"
                    evaluations.S_WhicheverClause = "None"
                    evaluations.S_NoMatterHowClause = "None"
                    evaluations.S_HoweverClause = "None"
                    evaluations.S_NoMatterWhenClause = "None"

                    evaluations.GeneralScore = 0.2125
                    # evaluations.a
                    evaluations.Words = 0.3382
                    evaluations.Phrases = 0.1894
                    evaluations.Sentences = 0.3667
                    evaluations.Source_Words = 0.4115
                    evaluations.Source_Phrases = 0.2365
                    evaluations.Source_FunctionalWords = "None"
                    evaluations.Source_SubstanWords = 0.4125
                    evaluations.Source_SpecialWords = 0.3333
                    evaluations.Target_Words = 0.3341
                    evaluations.Target_Phrases = 0.1884
                    evaluations.Target_FunctionalWords = 0.25
                    evaluations.Target_SubstanWords = 0.3436
                    evaluations.DefaultGroup = 0.2125

                    evaluations.b = 0.4293
                    evaluations.Result_Clauses = "None"
                    evaluations.Manner_Clauses = "None"
                    evaluations.Compare_Clauses = "None"
                    evaluations.Tense_Yes = 0.3667
                    evaluations.Tense_No = "None"
                    evaluations.Tense = 0.3667
                    evaluations.Reason_Clauses = "None"
                    evaluations.Concessive_Clauses = "None"
                    evaluations.Purpose_Clauses = "None"
                    evaluations.Clauses = "None"
                    evaluations.Condition_Clauses = "None"
                    evaluations.Time_Clauses = "None"

                evaluations.save()
                error = "successful."
                print username, remark
                return render_to_response("evaluation.html", {"ef": ef, "error": error})
    else:
        ef = EvaluateForm()
    return render_to_response("evaluation.html", {"ef": ef})
コード例 #5
0
ファイル: views.py プロジェクト: martflu/amanaman-old
def save_eval(request, course_short_title):
    if request.is_ajax():

        #for key, value in request.POST.iteritems():
        #    print(key + " " + value)

        elab_id = request.POST['elaboration_id']
        publish = request.POST['publish']
        elab_parts_count = request.POST['elaboration_parts_count']
        elab_parts = simplejson.loads(request.POST['elab_parts'])
        user = CustomUser.objects.get(username=request.user.username)
        subm = Submission.objects.get(elaboration_id=elab_id)

        if publish == 'true':
            publish = True
        elif publish == 'false':
            publish = False

        re={}

        with reversion.create_revision():
            try:
                eval = Evaluation.objects.get(submission=subm)
                eval_exists = True
            except ObjectDoesNotExist:
                eval = Evaluation(submission=subm, user=user)
                eval_exists = False
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_STARTED
                eval.save()

            for i in range (0, int(elab_parts_count)):
                elab_part_dict = elab_parts[i]
                eval_text = elab_part_dict['evaluation_text']
                eval_points = elab_part_dict['evaluation_points']
                elab_part_id = elab_part_dict['elaboration_part_id']

                eval_text = strip_tags(eval_text)

                if eval_points == '':
                    eval_points = None

                elab_part = ElaborationPart.objects.get(id=elab_part_id)

                if eval_exists:
                    #eval = Evaluation.objects.get(submission=subm)
                    subm_part = SubmissionPart.objects.get(elaboration_part=elab_part, submission=subm)
                    eval_part = EvaluationPart.objects.get(evaluation=eval, submission_part=subm_part)
                    eval_part.feedback = eval_text
                    eval_part.assigned_points = eval_points
                    eval_part.save()
                    re['response']='updated'
                else:
                    subm_part = SubmissionPart.objects.get(submission=subm, elaboration_part=elab_part)
                    EvaluationPart(evaluation=eval, submission_part=subm_part, feedback=eval_text, assigned_points=eval_points).save()
                    re['response']='created'


            #create a version of all elaboration + submission objects
            #at the time of the first evaluation of a new submission
            if not eval_exists or eval.evaluation_state==Evaluation.EVALUATION_STATE_NO_EVALUATION:
                elab = Elaboration.objects.get(id=elab_id)

                elab.save()
                subm.save()

                for i in range (0, int(elab_parts_count)):

                    elab_part_dict = elab_parts[i]
                    elab_part_id = elab_part_dict['elaboration_part_id']

                    elab_part = ElaborationPart.objects.get(id=elab_part_id)
                    subm_part = SubmissionPart.objects.get(submission=subm, elaboration_part=elab_part)

                    elab_part.save()
                    subm_part.save()

            #save states
            if publish:
                subm.submission_state=Submission.SUBMISSION_STATE_EVALUATED
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_PUBLISHED
            else:
                subm.submission_state=Submission.SUBMISSION_STATE_WAITING_FOR_EVALUATION
                eval.evaluation_state = Evaluation.EVALUATION_STATE_EVALUATION_STARTED

            eval.creation_date=timezone.localtime(timezone.now())
            eval.save()


        subm.save()

        lock = SubmissionLock.objects.get(submission=subm, user=user, release_date__gt=timezone.localtime(timezone.now()))
        lock.release_date = timezone.localtime(timezone.now())
        lock.save()
        re['lock']='resolved'

        response = simplejson.dumps(re)

        return HttpResponse(response, mimetype='application/json')
コード例 #6
0
ファイル: views.py プロジェクト: martflu/amanaman-old
def create_test_challenge(request, course_short_title):
    if request.is_ajax():
        step = request.POST['step']

        if int(step)==0:
            #create a elab+subm
            print('create test elab+subm')

            elaboration = Elaboration(id=10000, main_challenge_id=200, portfolio_id=1, user_id=1)
            elaboration.creation_date=datetime.datetime(2013, 2, 16, 15, 24, 17, 345952, tzinfo=utc)
            elaboration.save()

            elaboration_part = ElaborationPart(elaboration=elaboration, sub_challenge_id=200,
                elaboration_text='this is some test text for subchallenge 1 version 1', elaboration_points=4)
            elaboration_part.save()
            elaboration_part2 = ElaborationPart(elaboration=elaboration, sub_challenge_id=201,
                elaboration_text='this is some test text for subchallenge 2 version 1', elaboration_points=4)
            elaboration_part2.save()
            submission = Submission(id=10000, elaboration=elaboration, submission_date=datetime.datetime(2013, 2, 18, 15, 24, 17, 345952, tzinfo=utc))
            submission.save()

            submission.submission_date = datetime.datetime(2013, 2, 18, 15, 24, 17, 345952, tzinfo=utc)
            submission.save()

            submission_part = SubmissionPart(submission=submission, elaboration_part=elaboration_part)
            submission_part.save()

            submission_part2 = SubmissionPart(submission=submission, elaboration_part=elaboration_part2)
            submission_part2.save()
        elif int(step)==1:
            #create a evaluation
            print('create test evaluation')
            with reversion.create_revision():

                elaboration = Elaboration.objects.get(id=10000)
                elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
                elaboration_part.save()
                elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)
                elaboration_part2.save()

                submission = Submission.objects.get(id=10000)
                submission.submission_state = Submission.SUBMISSION_STATE_EVALUATED
                submission.save()

                submission_part = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part)
                submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

                submission_part.save()
                submission_part2.save()

                evaluation = Evaluation(id=10000, submission=submission, user_id=2, creation_date=timezone.localtime(timezone.now()), evaluation_state=Evaluation.EVALUATION_STATE_EVALUATION_PUBLISHED)
                evaluation.creation_date=datetime.datetime(2013, 2, 20, 15, 24, 17, 345952, tzinfo=utc)
                evaluation.save()

                eval_part = EvaluationPart(evaluation=evaluation, submission_part=submission_part, feedback='this is version 1 of the evaluation of version 1 of the subchallenge 1', assigned_points=3)
                eval_part.save()

                eval_part2 = EvaluationPart(evaluation=evaluation, submission_part=submission_part2, feedback='this is version 1 of the evaluation of version 1 of the subchallenge 2', assigned_points=2)
                eval_part2.save()

        elif int(step)==2:
            #create a second evaluation
            print('create test 2nd eval')
            with reversion.create_revision():
                submission = Submission.objects.get(id=10000)
                elaboration = Elaboration.objects.get(id=10000)

                elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
                elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)

                submission_part = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part)
                submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

                evaluation = Evaluation.objects.get(id=10000)
                evaluation.creation_date=datetime.datetime(2013, 2, 22, 15, 24, 17, 345952, tzinfo=utc)
                evaluation.save()

                eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
                eval_part.feedback = 'this is version 2 of the evaluation of version 1 of the subchallenge 1'
                eval_part.assigned_points = 4
                eval_part.save()

                eval_part2 = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part2)
                eval_part2.feedback = 'this is version 2 of the evaluation of version 1 of the subchallenge 2'
                eval_part2.assigned_points = 4
                eval_part2.save()
        elif int(step)==3:
            #revise the submission
            print('revise the subm')

            submission = Submission.objects.get(id=10000)
            submission.submission_state = Submission.SUBMISSION_STATE_BEING_REVISED
            submission.save()
        elif int(step)==4:
            #create a second version of the elab + submit again
            print('create test 2nd elab+subm')
            elaboration = Elaboration.objects.get(id=10000)

            elaboration_part = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=200)
            elaboration_part2 = ElaborationPart.objects.get(elaboration=elaboration, sub_challenge_id=201)

            elaboration_part.elaboration_text = 'this is some test created elaboration for subchallenge 1, version 2'
            elaboration_part.save()

            elaboration_part2.elaboration_text = 'this is some test created elaboration for subchallenge 2, version 2'
            elaboration_part2.save()

            submission = Submission.objects.get(id=10000)
            submission_part = SubmissionPart.objects.get(submission = submission, elaboration_part = elaboration_part)
            submission_part2 = SubmissionPart.objects.get(submission=submission, elaboration_part = elaboration_part2)

            submission.submission_date=datetime.datetime(2013, 2, 24, 15, 24, 17, 345952, tzinfo=utc)
            submission.submission_state = Submission.SUBMISSION_STATE_WAITING_FOR_EVALUATION
            submission.save()
            submission_part.save()
            submission_part2.save()

            """
            evaluation = Evaluation.objects.get(id=10000)
            eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
            eval_part.delete()
            evaluation.delete()
             """

            evaluation = Evaluation.objects.get(id=10000)
            evaluation.creation_date=datetime.datetime(2013, 2, 26, 15, 24, 17, 345952, tzinfo=utc)
            evaluation.evaluation_state=Evaluation.EVALUATION_STATE_NO_EVALUATION
            evaluation.save()

            eval_part = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part)
            eval_part.feedback = ''
            eval_part.assigned_points = None
            eval_part.save()

            eval_part2 = EvaluationPart.objects.get(evaluation=evaluation, submission_part=submission_part2)
            eval_part2.feedback = ''
            eval_part2.assigned_points = None
            eval_part2.save()

        return HttpResponse({}, mimetype='application/json')
コード例 #7
0
def evalresult_csv_parse(filename):
    questions = {}

    with io.open(filename, 'r', encoding='utf-8') as csvfile:
        csvreader = unicode_csv_reader(csvfile, delimiter=',', quotechar='"')
        for row in csvreader:
            print ".",
            if row[0] == "evaluation":
                # Evaluation data, should be first line
                cureval = Evaluation(title=row[1], etype="PMC")  # , date=row[2], comments=row[3])
                cureval.save()

            elif row[0] == "comment":
                # Commentaires sur la EvalItem
                for col, quest in enumerate(row[4:]):
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                    questions[col].comment = quest
                    questions[col].save()

            elif row[0] == "question":
                # Description de la EvalItem
                for col, quest in enumerate(row[4:]):
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                    questions[col].content = quest
                    questions[col].save()

            elif row[0] in ["points", "bareme"]:
                # Barème
                for col, quest in enumerate(row[4:]):
                    bareme = float(quest.replace(",", "."))
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                        questions[col].save()
                    point = EvalItemPoint(item=questions[col], point=bareme, ptype="")
                    point.save()

            elif row[0] == "refitem":
                # Elément de référence
                for col, quest in enumerate(row[4:]):
                    if quest != "":
                        refitemid = int(quest)
                        refitem = EvalRefItem.objects.get(pk=refitemid)
                    if col not in questions.keys():
                        questions[col] = EvalItem(evaluation=cureval, itype="")
                        questions[col].save()
                    ref = EvalItemEvalref(item=questions[col], evalref=refitem, title="", rtype="")
                    ref.save()

            elif row[0] == "student":
                # Etudiant
                student = EUser(first_name=row[3], last_name=row[2])
                student.username = unique_username(student)
                student.save()
                student.student.is_student = True
                student.student.save()
                paper = EvalPaper(evaluation=cureval, ufor=student, uby=student)
                paper.save()
                for col, res in enumerate(row[4:]):
                    answer = EvalPaperAnswer(paper=paper, evalitem=questions[col])
                    answer.save()
                    corr = EvalPaperEvalitem(evalitem=questions[col], answer=answer, uby=student, status="COR")
                    corr.save()
                    if quest != "":
                        points = EvalPaperEvalitemPoint(
                            pevalitem=corr, pointref=questions[col].points.all()[0],
                            result=float(quest.replace(',', '.')), status="", ptype="")
                        points.save()
コード例 #8
0
ファイル: views.py プロジェクト: jiadongy/ebeirui-django
    def post(request):
        email = request.POST.get("email", "")
        answers = request.POST.get("answers", [])

        answer_list = answers.split("#")[:-1]

        green, yellow, blue, red = 0, 0, 0, 0

        for i in range(len(answer_list)):
            xg_instance = Evaluation()
            xg_instance.exam_index = 1
            xg_instance.email = email

            xg_instance.question_index = i + 1
            xg_instance.answer_index = answer_list[i]
            xg_instance.date = datetime.datetime.now().strftime(
                '%Y-%m-%d %H:%M:%S')
            xg_instance.save()

            if i <= 14:
                if answer_list[i] == '1':
                    green += 1
                elif answer_list[i] == '2':
                    yellow += 1
                elif answer_list[i] == '3':
                    blue += 1
                else:
                    red += 1
            else:
                if answer_list[i] == '1':
                    red += 1
                elif answer_list[i] == '2':
                    blue += 1
                elif answer_list[i] == '3':
                    yellow += 1
                else:
                    green += 1

        score_list = [green, yellow, blue, red]
        max_score = max(score_list)
        max_index = score_list.index(max(score_list))

        if max_score < 9:
            return render(request, "evaluation/xg-complex-result.html", {})

        else:

            if max_index == 0:
                result_colour = constants.PERSONALITY[0][0]
                result_tags = constants.PERSONALITY[0][1]
                result_comment = constants.PERSONALITY[0][2]
                result_br = constants.PERSONALITY[0][3]

            if max_index == 1:
                result_colour = constants.PERSONALITY[1][0]
                result_tags = constants.PERSONALITY[1][1]
                result_comment = constants.PERSONALITY[1][2]
                result_br = constants.PERSONALITY[1][3]

            if max_index == 2:
                result_colour = constants.PERSONALITY[2][0]
                result_tags = constants.PERSONALITY[2][1]
                result_comment = constants.PERSONALITY[2][2]
                result_br = constants.PERSONALITY[2][3]

            if max_index == 3:
                result_colour = constants.PERSONALITY[3][0]
                result_tags = constants.PERSONALITY[3][1]
                result_comment = constants.PERSONALITY[3][2]
                result_br = constants.PERSONALITY[3][3]

            return render(
                request, "evaluation/xg-result.html", {
                    "result_colour": result_colour,
                    "result_tags": result_tags,
                    "result_comment": result_comment.splitlines(),
                    "result_br": result_br
                })