예제 #1
0
파일: api.py 프로젝트: jbau/edx-ora2
def _log_submission(submission, student_item):
    """
    Log the creation of a submission.

    Args:
        submission (dict): The serialized submission model.
        student_item (dict): The serialized student item model.

    Returns:
        None
    """
    logger.info(
        u"Created submission uuid={submission_uuid} for "
        u"(course_id={course_id}, item_id={item_id}, "
        u"anonymous_student_id={anonymous_student_id})"
        .format(
            submission_uuid=submission["uuid"],
            course_id=student_item["course_id"],
            item_id=student_item["item_id"],
            anonymous_student_id=student_item["student_id"]
        )
    )
    tags = [
        u"course_id:{course_id}".format(course_id=student_item['course_id']),
        u"item_id:{item_id}".format(item_id=student_item['item_id']),
        u"item_type:{item_type}".format(item_type=student_item['item_type']),
    ]
    dog_stats_api.histogram('submissions.submission.size', len(submission['answer']), tags=tags)
    dog_stats_api.increment('submissions.submission.count', tags=tags)
    def get_problem_list(self, course_id, grader_id):
        """
        Get the list of problems for a given course.

        Args:
            course_id: course id that we want the problems of
            grader_id: who is grading this?  The anonymous user_id of the grader.

        Returns:
            dict with the response from the service.  (Deliberately not
            writing out the fields here--see the docs on the staff_grading view
            in the grading_controller repo)

        Raises:
            GradingServiceError: something went wrong with the connection.
        """
        params = {
            'course_id': course_id.to_deprecated_string(),
            'grader_id': grader_id
        }
        result = self.get(self.get_problem_list_url, params)
        tags = [u'course_id:{}'.format(course_id)]
        self._record_result('get_problem_list', result, tags)
        dog_stats_api.histogram(
            self._metric_name('get_problem_list.result.length'),
            len(result.get('problem_list', [])))
        return result
예제 #3
0
    def _log_complete_workflow(self):
        """
        A logging operation called at the end of an AI Workflow's Life
        Reports the total time the task took.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        # Calculates the time taken to complete the task and reports it to datadog
        time_delta = self.completed_at - self.scheduled_at
        dog_stats_api.histogram(data_path + '.turnaround_time',
                                time_delta.total_seconds(),
                                tags=tags)

        dog_stats_api.increment(data_path + '.completed_count', tags=tags)

        logger.info((
            u"{class_name} with uuid {uuid} completed its workflow successfully "
            u"in {seconds} seconds.").format(
                class_name=class_name,
                uuid=self.uuid,
                seconds=time_delta.total_seconds()))
예제 #4
0
def _log_score(score):
    """
    Log the creation of a score.

    Args:
        score (Score): The score model.

    Returns:
        None
    """
    logger.info("Score of ({}/{}) set for submission {}".format(
        score.points_earned, score.points_possible, score.submission.uuid))
    tags = [
        u"course_id:{course_id}".format(
            course_id=score.student_item.course_id),
        u"item_id:{item_id}".format(item_id=score.student_item.item_id),
        u"item_type:{item_type}".format(
            item_type=score.student_item.item_type),
    ]

    time_delta = score.created_at - score.submission.created_at
    dog_stats_api.histogram('submissions.score.seconds_since_submission',
                            time_delta.total_seconds(),
                            tags=tags)

    score_percentage = score.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('submissions.score.score_percentage',
                                score_percentage,
                                tags=tags)

    dog_stats_api.increment('submissions.score.count', tags=tags)
예제 #5
0
def _log_complete_reschedule_training(course_id=None, item_id=None, seconds=-1, success=False):
    """
    Sends the total time the rescheduling of training tasks took to datadog
    Note that this function may be invoked multiple times per call to reschedule_training_tasks,
    because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
    we log the time elapsed before trying again.)

    Args:
        course_id (unicode): the course_id to tag the task with
        item_id (unicode): the item_id to tag the task with
        seconds (int): the number of seconds that elapsed during the rescheduling task.
        success (bool): indicates whether or not all attempts to reschedule were successful
    """
    tags = [
        u"course_id:{}".format(course_id),
        u"item_id:{}".format(item_id),
        u"success:{}".format(success)
    ]

    dog_stats_api.histogram('openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time', seconds,tags)
    dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.completed_count', tags)

    msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
    if not success:
        msg += u" At least one rescheduling task failed due to internal error."
    msg.format(cid=course_id, iid=item_id, s=seconds)
    logger.info(msg)
예제 #6
0
def _log_complete_reschedule_training(course_id=None,
                                      item_id=None,
                                      seconds=-1,
                                      success=False):
    """
    Sends the total time the rescheduling of training tasks took to datadog
    Note that this function may be invoked multiple times per call to reschedule_training_tasks,
    because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
    we log the time elapsed before trying again.)

    Args:
        course_id (unicode): the course_id to tag the task with
        item_id (unicode): the item_id to tag the task with
        seconds (int): the number of seconds that elapsed during the rescheduling task.
        success (bool): indicates whether or not all attempts to reschedule were successful
    """
    tags = [
        u"course_id:{}".format(course_id), u"item_id:{}".format(item_id),
        u"success:{}".format(success)
    ]

    dog_stats_api.histogram(
        'openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time',
        seconds, tags)
    dog_stats_api.increment(
        'openassessment.assessment.ai_task.AIRescheduleTraining.completed_count',
        tags)

    msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
    if not success:
        msg += u" At least one rescheduling task failed due to internal error."
    msg.format(cid=course_id, iid=item_id, s=seconds)
    logger.info(msg)
    def get_problem_list(self, course_id, grader_id):
        """
        Get the list of problems for a given course.

        Args:
            course_id: course id that we want the problems of
            grader_id: who is grading this?  The anonymous user_id of the grader.

        Returns:
            dict with the response from the service.  (Deliberately not
            writing out the fields here--see the docs on the staff_grading view
            in the grading_controller repo)

        Raises:
            GradingServiceError: something went wrong with the connection.
        """
        params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id}
        result = self.get(self.get_problem_list_url, params)
        tags = [u'course_id:{}'.format(course_id)]
        self._record_result('get_problem_list', result, tags)
        dog_stats_api.histogram(
            self._metric_name('get_problem_list.result.length'),
            len(result.get('problem_list', []))
        )
        return result
예제 #8
0
def histogram(metric_name, *args, **kwargs):
    """
    Wrapper around dog_stats_api.histogram that cleans any tags used.
    """
    if "tags" in kwargs:
        kwargs["tags"] = _clean_tags(kwargs["tags"])
    dog_stats_api.histogram(metric_name, *args, **kwargs)
예제 #9
0
    def send_messages(self, email_messages):

        # check settings hook for rewriting email recipient, act accordingly
        if settings.EMAIL_REWRITE_RECIPIENT:
            for message in email_messages:
                message.to = [settings.EMAIL_REWRITE_RECIPIENT]

        # send the messages
        t = time.time()
        msg_count = self._backend.send_messages(email_messages)
        elapsed = time.time() - t
        if msg_count > 0:
            logger.info('sent %s messages, elapsed: %.3fs' %
                        (msg_count, elapsed))
            # report an average timing to datadog
            dog_stats_api.histogram('notifier.send.time', elapsed / msg_count)
            dog_stats_api.increment('notifier.send.count', msg_count)
            for msg in email_messages:
                hdrs = dict(
                    (k, v) for k, v in dict(msg.message()).iteritems()
                    if k.lower() not in ('date', 'from', 'subject',
                                         'content-type', 'mime-version'))
                logger.info("sent email: {}".format(repr(hdrs)))
        if msg_count != len(email_messages):
            logger.warn(
                'send_messages() was called with %s messages but return value was %s',
                len(email_messages), msg_count)
        return msg_count
예제 #10
0
파일: ai.py 프로젝트: louyihua/edx-ora2
    def _log_complete_workflow(self):
        """
        A logging operation called at the end of an AI Workflow's Life
        Reports the total time the task took.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        # Calculates the time taken to complete the task and reports it to datadog
        time_delta = self.completed_at - self.scheduled_at
        dog_stats_api.histogram(
            data_path + '.turnaround_time',
            time_delta.total_seconds(),
            tags=tags
        )

        dog_stats_api.increment(data_path + '.completed_count', tags=tags)

        logger.info(
            (
                u"{class_name} with uuid {uuid} completed its workflow successfully "
                u"in {seconds} seconds."
            ).format(class_name=class_name, uuid=self.uuid, seconds=time_delta.total_seconds())
        )
예제 #11
0
    def send_messages(self, email_messages):

        # check settings hook for rewriting email recipient, act accordingly
        if settings.EMAIL_REWRITE_RECIPIENT:
            for message in email_messages:
                message.to = [settings.EMAIL_REWRITE_RECIPIENT]
        if settings.EMAIL_SEND_COPY_TO:
            for message in email_messages:
                message.bcc = [settings.EMAIL_SEND_COPY_TO]

        # send the messages
        t = time.time()
        msg_count = self._backend.send_messages(email_messages)
        elapsed = time.time() - t
        if msg_count > 0:
            logger.info('sent %s messages, elapsed: %.3fs' % (msg_count, elapsed))
            # report an average timing to datadog
            dog_stats_api.histogram('notifier.send.time', elapsed / msg_count)
            dog_stats_api.increment('notifier.send.count', msg_count)
            for msg in email_messages:
                hdrs = dict((k, v) for k, v in dict(msg.message()).iteritems()
                            if k.lower() not in ('date', 'from', 'subject', 'content-type', 'mime-version'))
                logger.info("sent email: {}".format(repr(hdrs)))
        if msg_count != len(email_messages):
            logger.warn('send_messages() was called with %s messages but return value was %s',
                len(email_messages), msg_count)
        return msg_count
예제 #12
0
def histogram(metric_name, *args, **kwargs):
    """
    Wrapper around dog_stats_api.histogram that cleans any tags used.
    """
    if "tags" in kwargs:
        kwargs["tags"] = _clean_tags(kwargs["tags"])
    dog_stats_api.histogram(metric_name, *args, **kwargs)
예제 #13
0
def _log_assessment(assessment, scorer_workflow):
    """
    Log the creation of a peer assessment.

    Args:
        assessment (Assessment): The assessment model that was created.
        scorer_workflow (dict): A dictionary representation of the Workflow
            belonging to the scorer of this assessment.

    Returns:
        None

    """
    logger.info(
        u"Created peer-assessment {assessment_id} for submission "
        u"{submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}; scored by {scorer}".format(
            assessment_id=assessment.id,
            submission_uuid=assessment.submission_uuid,
            course_id=scorer_workflow.course_id,
            item_id=scorer_workflow.item_id,
            rubric_content_hash=assessment.rubric.content_hash,
            scorer=scorer_workflow.student_id,
        ))

    tags = [
        u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
        u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
        u"type:peer",
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('openassessment.assessment.score_percentage',
                                score_percentage,
                                tags=tags)

    # Calculate the time spent assessing
    # This is the time from when the scorer retrieved the submission
    # (created the peer workflow item) to when they completed an assessment.
    # By this point, the assessment *should* have an associated peer workflow item,
    # but if not, we simply skip the event.
    try:
        workflow_item = assessment.peerworkflowitem_set.get()
    except (PeerWorkflowItem.DoesNotExist,
            PeerWorkflowItem.MultipleObjectsReturned, DatabaseError):
        msg = u"Could not retrieve peer workflow item for assessment: {assessment}".format(
            assessment=assessment.id)
        logger.exception(msg)
        workflow_item = None

    if workflow_item is not None:
        time_delta = assessment.scored_at - workflow_item.started_at
        dog_stats_api.histogram(
            'openassessment.assessment.seconds_spent_assessing',
            time_delta.total_seconds(),
            tags=tags)

    dog_stats_api.increment('openassessment.assessment.count', tags=tags)
예제 #14
0
 def get_problem_list(self, course_id, grader_id):
     params = {'course_id': course_id, 'student_id': grader_id}
     result = self.get(self.get_problem_list_url, params)
     self._record_result('get_problem_list', result)
     dog_stats_api.histogram(
         self._metric_name('get_problem_list.result.length'),
         len(result.get('problem_list', [])))
     return result
 def get_problem_list(self, course_id, grader_id):
     params = {'course_id': course_id, 'student_id': grader_id}
     result = self.get(self.get_problem_list_url, params)
     self._record_result('get_problem_list', result)
     dog_stats_api.histogram(
         self._metric_name('get_problem_list.result.length'),
         len(result.get('problem_list',[]))
     )
     return result
    def check_for_eta(self, location):
        params = {
            'location': location,
        }
        data = self.get(self.check_eta_url, params)
        self._record_result('check_for_eta', data)
        dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0))

        return data
    def check_for_eta(self, location):
        params = {
            'location': location,
        }
        data = self.get(self.check_eta_url, params)
        self._record_result('check_for_eta', data)
        dog_stats_api.histogram(self._metric_name('check_for_eta.eta'),
                                data.get('eta', 0))

        return data
예제 #18
0
def _log_assessment(assessment, scorer_workflow):
    """
    Log the creation of a peer assessment.

    Args:
        assessment (Assessment): The assessment model that was created.
        scorer_workflow (dict): A dictionary representation of the Workflow
            belonging to the scorer of this assessment.

    Returns:
        None

    """
    logger.info(
        u"Created peer-assessment {assessment_id} for submission "
        u"{submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}; scored by {scorer}".format(
            assessment_id=assessment.id,
            submission_uuid=assessment.submission_uuid,
            course_id=scorer_workflow.course_id,
            item_id=scorer_workflow.item_id,
            rubric_content_hash=assessment.rubric.content_hash,
            scorer=scorer_workflow.student_id,
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
        u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
        u"type:peer",
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram("openassessment.assessment.score_percentage", score_percentage, tags=tags)

    # Calculate the time spent assessing
    # This is the time from when the scorer retrieved the submission
    # (created the peer workflow item) to when they completed an assessment.
    # By this point, the assessment *should* have an associated peer workflow item,
    # but if not, we simply skip the event.
    try:
        workflow_item = assessment.peerworkflowitem_set.get()
    except (PeerWorkflowItem.DoesNotExist, PeerWorkflowItem.MultipleObjectsReturned, DatabaseError):
        msg = u"Could not retrieve peer workflow item for assessment: {assessment}".format(assessment=assessment.id)
        logger.exception(msg)
        workflow_item = None

    if workflow_item is not None:
        time_delta = assessment.scored_at - workflow_item.started_at
        dog_stats_api.histogram(
            "openassessment.assessment.seconds_spent_assessing", time_delta.total_seconds(), tags=tags
        )

    dog_stats_api.increment("openassessment.assessment.count", tags=tags)
    def process_response(self, request, response):
        """ Submit timing metrics from the current request """
        if not hasattr(request, self.DD_TIMING_ATTRIBUTE):
            return

        # Calculate request time and submit to Datadog
        request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE)
        tags = self._get_metric_tags(request)
        dog_stats_api.histogram(self.timing_metric, request_time, tags=tags)

        return response
예제 #20
0
    def process_response(self, request, response):
        """ Submit timing metrics from the current request """
        if not hasattr(request, self.DD_TIMING_ATTRIBUTE):
            return response

        # Calculate request time and submit to Datadog
        request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE)
        tags = self._get_metric_tags(request)
        dog_stats_api.histogram(self.timing_metric, request_time, tags=tags)

        return response
    def get_data_for_location(self, problem_location, student_id):
        if isinstance(problem_location, UsageKey):
            problem_location = problem_location.to_deprecated_string()
        params = {"location": problem_location, "student_id": student_id}
        result = self.get(self.get_data_for_location_url, params)
        self._record_result("get_data_for_location", result)
        for key in result.keys():
            if key in ("success", "error", "version"):
                continue

            dog_stats_api.histogram(self._metric_name("get_data_for_location.{}".format(key)), result[key])
        return result
예제 #22
0
def request_timer(request_id, method, url):
    start = time()
    yield
    end = time()
    duration = end - start
    dog_stats_api.histogram('comment_client.request.time', duration, end)
    log.info(
        "comment_client_request_log: request_id={request_id}, method={method}, "
        "url={url}, duration={duration}".format(request_id=request_id,
                                                method=method,
                                                url=url,
                                                duration=duration))
예제 #23
0
    def get_data_for_location(self, problem_location, student_id):
        params = {'location': problem_location, 'student_id': student_id}
        result = self.get(self.get_data_for_location_url, params)
        self._record_result('get_data_for_location', result)
        for key in result.keys():
            if key in ('success', 'error', 'version'):
                continue

            dog_stats_api.histogram(
                self._metric_name('get_data_for_location.{}'.format(key)),
                result[key],
            )
        return result
    def get_problem_list(self, course_id, grader_id):
        params = {"course_id": course_id.to_deprecated_string(), "student_id": grader_id}
        result = self.get(self.get_problem_list_url, params)

        if "problem_list" in result:
            for problem in result["problem_list"]:
                problem["location"] = course_id.make_usage_key_from_deprecated_string(problem["location"])

        self._record_result("get_problem_list", result)
        dog_stats_api.histogram(
            self._metric_name("get_problem_list.result.length"), len(result.get("problem_list", []))
        )
        return result
    def get_flagged_problem_list(self, course_id):
        params = {
            'course_id': course_id.to_deprecated_string(),
        }

        data = self.get(self.flagged_problem_list_url, params)

        tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
        self._record_result('get_flagged_problem_list', data, tags)
        dog_stats_api.histogram(
            self._metric_name('get_flagged_problem_list.length'),
            len(data.get('flagged_submissions', [])))
        return data
    def get_data_for_location(self, problem_location, student_id):
        params = {'location': problem_location, 'student_id': student_id}
        result = self.get(self.get_data_for_location_url, params)
        self._record_result('get_data_for_location', result)
        for key in result.keys():
            if key in ('success', 'error', 'version'):
                continue

            dog_stats_api.histogram(
                self._metric_name('get_data_for_location.{}'.format(key)),
                result[key],
            )
        return result
예제 #27
0
    def get_problem_list(self, course_id, grader_id):
        params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
        result = self.get(self.get_problem_list_url, params)

        if 'problem_list' in result:
            for problem in result['problem_list']:
                problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location'])

        self._record_result('get_problem_list', result)
        dog_stats_api.histogram(
            self._metric_name('get_problem_list.result.length'),
            len(result.get('problem_list',[]))
        )
        return result
    def get_flagged_problem_list(self, course_id):
        params = {
            'course_id': course_id.to_deprecated_string(),
        }

        data = self.get(self.flagged_problem_list_url, params)

        tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
        self._record_result('get_flagged_problem_list', data, tags)
        dog_stats_api.histogram(
            self._metric_name('get_flagged_problem_list.length'),
            len(data.get('flagged_submissions', []))
        )
        return data
예제 #29
0
    def viewstat(self, request):
        """ Capture Django get view   """
        if not hasattr(request, self.DD_TIMING_ATTRIBUTE):
            return response
        ''' reuse the request.META json-serializer code'''
        szble = {}
        for k, v in request.META.items():
            if isinstance(v, (list, basestring, bool, int, float, long)):
                szble[k] = v
            else:
                szble[k] = str(v)

        tags = self._get_metric_tags(request)
        dog_stats_api.histogram(self.timing_metric, request_time, tags=tags)
    def get_grading_status_list(self, course_id, student_id):
        params = {
            'student_id': student_id,
            'course_id': course_id.to_deprecated_string(),
        }

        data = self.get(self.grading_status_list_url, params)

        tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
        self._record_result('get_grading_status_list', data, tags)
        dog_stats_api.histogram(
            self._metric_name('get_grading_status_list.length'),
            len(data.get('problem_list', [])),
            tags=tags)
        return data
예제 #31
0
def request_timer(request_id, method, url):
    start = time()
    yield
    end = time()
    duration = end - start
    dog_stats_api.histogram('comment_client.request.time', duration, end)
    log.info(
        "comment_client_request_log: request_id={request_id}, method={method}, "
        "url={url}, duration={duration}".format(
            request_id=request_id,
            method=method,
            url=url,
            duration=duration
        )
    )
    def get_grading_status_list(self, course_id, student_id):
        params = {
            'student_id': student_id,
            'course_id': course_id.to_deprecated_string(),
        }

        data = self.get(self.grading_status_list_url, params)

        tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
        self._record_result('get_grading_status_list', data, tags)
        dog_stats_api.histogram(
            self._metric_name('get_grading_status_list.length'),
            len(data.get('problem_list', [])),
            tags=tags
        )
        return data
    def get_problem_list(self, course_id, grader_id):
        params = {
            'course_id': course_id.to_deprecated_string(),
            'student_id': grader_id
        }
        result = self.get(self.get_problem_list_url, params)

        if 'problem_list' in result:
            for problem in result['problem_list']:
                problem[
                    'location'] = course_id.make_usage_key_from_deprecated_string(
                        problem['location'])

        self._record_result('get_problem_list', result)
        dog_stats_api.histogram(
            self._metric_name('get_problem_list.result.length'),
            len(result.get('problem_list', [])))
        return result
예제 #34
0
def _log_submission(submission, student_item):
    """
    Log the creation of a submission.

    Args:
        submission (dict): The serialized submission model.
        student_item (dict): The serialized student item model.

    Returns:
        None
    """
    logger.info(
        u"Created submission uuid={submission_uuid} for "
        u"(course_id={course_id}, item_id={item_id}, "
        u"anonymous_student_id={anonymous_student_id})"
        .format(
            submission_uuid=submission["uuid"],
            course_id=student_item["course_id"],
            item_id=student_item["item_id"],
            anonymous_student_id=student_item["student_id"]
        )
    )
    tags = [
        u"course_id:{course_id}".format(course_id=student_item['course_id']),
        u"item_id:{item_id}".format(item_id=student_item['item_id']),
        u"item_type:{item_type}".format(item_type=student_item['item_type']),
    ]
    dog_stats_api.increment('submissions.submission.count', tags=tags)

    # Submission answer is a JSON serializable, so we need to serialize it to measure its size in bytes
    try:
        answer_size = len(json.dumps(submission['answer']))
    except (ValueError, TypeError):
        msg = u"Could not serialize submission answer to calculate its length: {}".format(submission['answer'])
        logger.exception(msg)
    else:
        dog_stats_api.histogram('submissions.submission.size', answer_size, tags=tags)
예제 #35
0
파일: self.py 프로젝트: YoshidaKS/edx-ora2
def _log_assessment(assessment, submission):
    """
    Log the creation of a self-assessment.

    Args:
        assessment (Assessment): The assessment model.
        submission (dict): The serialized submission model.

    Returns:
        None

    """
    logger.info(
        u"Created self-assessment {assessment_id} for student {user} on "
        u"submission {submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}"
        .format(
            assessment_id=assessment.id,
            user=submission['student_item']['student_id'],
            submission_uuid=submission['uuid'],
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            rubric_content_hash=assessment.rubric.content_hash
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=submission['student_item']['course_id']),
        u"item_id:{item_id}".format(item_id=submission['student_item']['item_id']),
        u"type:self"
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('openassessment.assessment.score_percentage', score_percentage, tags=tags)

    dog_stats_api.increment('openassessment.assessment.count', tags=tags)
예제 #36
0
파일: self.py 프로젝트: silviot/edx-ora2
def _log_assessment(assessment, submission):
    """
    Log the creation of a self-assessment.

    Args:
        assessment (Assessment): The assessment model.
        submission (dict): The serialized submission model.

    Returns:
        None

    """
    logger.info(
        u"Created self-assessment {assessment_id} for learner {user} on "
        u"submission {submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}".format(
            assessment_id=assessment.id,
            user=submission['student_item']['student_id'],
            submission_uuid=submission['uuid'],
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            rubric_content_hash=assessment.rubric.content_hash))

    tags = [
        u"course_id:{course_id}".format(
            course_id=submission['student_item']['course_id']),
        u"item_id:{item_id}".format(
            item_id=submission['student_item']['item_id']), u"type:self"
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('openassessment.assessment.score_percentage',
                                score_percentage,
                                tags=tags)

    dog_stats_api.increment('openassessment.assessment.count', tags=tags)
예제 #37
0
파일: api.py 프로젝트: GbalsaC/bitnamiP
def _log_score(score):
    """
    Log the creation of a score.

    Args:
        score (Score): The score model.

    Returns:
        None
    """
    logger.info(
        "Score of ({}/{}) set for submission {}"
        .format(score.points_earned, score.points_possible, score.submission.uuid)
    )
    tags = [
        u"course_id:{course_id}".format(course_id=score.student_item.course_id),
        u"item_id:{item_id}".format(item_id=score.student_item.item_id),
        u"item_type:{item_type}".format(item_type=score.student_item.item_type),
    ]

    time_delta = score.created_at - score.submission.created_at
    dog_stats_api.histogram(
        'submissions.score.seconds_since_submission',
        time_delta.total_seconds(),
        tags=tags
    )

    score_percentage = score.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram(
            'submissions.score.score_percentage',
            score_percentage,
            tags=tags
        )

    dog_stats_api.increment('submissions.score.count', tags=tags)
예제 #38
0
def perform_request(method, url, data_or_params=None, raw=False,
                    metric_action=None, metric_tags=None, paged_results=False):

    if metric_tags is None:
        metric_tags = []

    metric_tags.append(u'method:{}'.format(method))
    if metric_action:
        metric_tags.append(u'action:{}'.format(metric_action))

    if data_or_params is None:
        data_or_params = {}
    headers = {
        'X-Edx-Api-Key': getattr(settings, "COMMENTS_SERVICE_KEY", None),
        'Accept-Language': get_language(),
    }
    request_id = uuid4()
    request_id_dict = {'request_id': request_id}

    if method in ['post', 'put', 'patch']:
        data = data_or_params
        params = request_id_dict
    else:
        data = None
        params = merge_dict(data_or_params, request_id_dict)
    with request_timer(request_id, method, url, metric_tags):
        response = requests.request(
            method,
            url,
            data=data,
            params=params,
            headers=headers,
            timeout=5
        )

    metric_tags.append(u'status_code:{}'.format(response.status_code))
    if response.status_code > 200:
        metric_tags.append(u'result:failure')
    else:
        metric_tags.append(u'result:success')

    dog_stats_api.increment('comment_client.request.count', tags=metric_tags)

    if 200 < response.status_code < 500:
        raise CommentClientRequestError(response.text, response.status_code)
    # Heroku returns a 503 when an application is in maintenance mode
    elif response.status_code == 503:
        raise CommentClientMaintenanceError(response.text)
    elif response.status_code == 500:
        raise CommentClient500Error(response.text)
    else:
        if raw:
            return response.text
        else:
            data = response.json()
            if paged_results:
                dog_stats_api.histogram(
                    'comment_client.request.paged.result_count',
                    value=len(data.get('collection', [])),
                    tags=metric_tags
                )
                dog_stats_api.histogram(
                    'comment_client.request.paged.page',
                    value=data.get('page', 1),
                    tags=metric_tags
                )
                dog_stats_api.histogram(
                    'comment_client.request.paged.num_pages',
                    value=data.get('num_pages', 1),
                    tags=metric_tags
                )
            return data
예제 #39
0
    def check_problem(self, data):
        """
        Checks whether answers to a problem are correct

        Returns a map of correct/incorrect answers:
          {'success' : 'correct' | 'incorrect' | AJAX alert msg string,
           'contents' : html}
        """
        event_info = dict()
        event_info['state'] = self.lcp.get_state()
        event_info['problem_id'] = self.location.url()

        answers = self.make_dict_of_responses(data)
        answers_without_files = convert_files_to_filenames(answers)
        event_info['answers'] = answers_without_files

        metric_name = u'capa.check_problem.{}'.format

        _ = self.runtime.service(self, "i18n").ugettext

        # Too late. Cannot submit
        if self.closed():
            event_info['failure'] = 'closed'
            self.runtime.track_function('problem_check_fail', event_info)
            if dog_stats_api:
                dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:closed'])
            raise NotFoundError(_("Problem is closed."))

        # Problem submitted. Student should reset before checking again
        if self.done and self.rerandomize == "always":
            event_info['failure'] = 'unreset'
            self.runtime.track_function('problem_check_fail', event_info)
            if dog_stats_api:
                dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:unreset'])
            raise NotFoundError(_("Problem must be reset before it can be checked again."))

        # Problem queued. Students must wait a specified waittime before they are allowed to submit
        if self.lcp.is_queued():
            current_time = datetime.datetime.now(UTC())
            prev_submit_time = self.lcp.get_recentmost_queuetime()
            waittime_between_requests = self.runtime.xqueue['waittime']
            if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                msg = _(u"You must wait at least {wait} seconds between submissions.").format(
                    wait=waittime_between_requests)
                return {'success': msg, 'html': ''}  # Prompts a modal dialog in ajax callback

        try:
            correct_map = self.lcp.grade_answers(answers)
            self.attempts = self.attempts + 1
            self.lcp.done = True
            self.set_state_from_lcp()

        except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
            log.warning("StudentInputError in capa_module:problem_check",
                        exc_info=True)

            # Save the user's state before failing
            self.set_state_from_lcp()

            # If the user is a staff member, include
            # the full exception, including traceback,
            # in the response
            if self.runtime.user_is_staff:
                msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))

            # Otherwise, display just an error message,
            # without a stack trace
            else:
                # Translators: {msg} will be replaced with a problem's error message.
                msg = _(u"Error: {msg}").format(msg=inst.message)

            return {'success': msg}

        except Exception as err:
            # Save the user's state before failing
            self.set_state_from_lcp()

            if self.runtime.DEBUG:
                msg = u"Error checking problem: {}".format(err.message)
                msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
                return {'success': msg}
            raise

        published_grade = self.publish_grade()

        # success = correct if ALL questions in this problem are correct
        success = 'correct'
        for answer_id in correct_map:
            if not correct_map.is_correct(answer_id):
                success = 'incorrect'

        # NOTE: We are logging both full grading and queued-grading submissions. In the latter,
        #       'success' will always be incorrect
        event_info['grade'] = published_grade['grade']
        event_info['max_grade'] = published_grade['max_grade']
        event_info['correct_map'] = correct_map.get_dict()
        event_info['success'] = success
        event_info['attempts'] = self.attempts
        event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
        self.runtime.track_function('problem_check', event_info)

        if dog_stats_api:
            dog_stats_api.increment(metric_name('checks'), [u'result:success'])
            dog_stats_api.histogram(
                metric_name('correct_pct'),
                float(published_grade['grade']) / published_grade['max_grade'],
            )
            dog_stats_api.histogram(
                metric_name('attempts'),
                self.attempts,
            )

        if hasattr(self.runtime, 'psychometrics_handler'):  # update PsychometricsData using callback
            self.runtime.psychometrics_handler(self.get_state_for_lcp())

        # render problem into HTML
        html = self.get_problem_html(encapsulate=False)

        return {
            'success': success,
            'contents': html,
        }
예제 #40
0
파일: statsd.py 프로젝트: DataDog/dogapi
from random import random
from dogapi import dog_stats_api

dog_stats_api.start(statsd=True,
                    statsd_host='localhost',
                    statsd_port=8125)

while True:
    dog_stats_api.gauge('test.udp.gauge', 1000)
    dog_stats_api.increment('test.udp.counter')
    dog_stats_api.histogram('test.udp.histogram', random() * 1000)
예제 #41
0
from random import random
from dogapi import dog_stats_api

dog_stats_api.start(statsd=True, statsd_host='localhost', statsd_port=8125)

while True:
    dog_stats_api.gauge('test.udp.gauge', 1000)
    dog_stats_api.increment('test.udp.counter')
    dog_stats_api.histogram('test.udp.histogram', random() * 1000)
예제 #42
0
def perform_request(method, url, data_or_params=None, raw=False,
                    metric_action=None, metric_tags=None, paged_results=False):

    if metric_tags is None:
        metric_tags = []

    metric_tags.append(u'method:{}'.format(method))
    if metric_action:
        metric_tags.append(u'action:{}'.format(metric_action))

    if data_or_params is None:
        data_or_params = {}
    headers = {
        'X-Edx-Api-Key': getattr(settings, "COMMENTS_SERVICE_KEY", None),
        'Accept-Language': get_language(),
    }
    request_id = uuid4()
    request_id_dict = {'request_id': request_id}

    if method in ['post', 'put', 'patch']:
        data = data_or_params
        params = request_id_dict
    else:
        data = None
        params = merge_dict(data_or_params, request_id_dict)
    with request_timer(request_id, method, url, metric_tags):
        response = requests.request(
            method,
            url,
            data=data,
            params=params,
            headers=headers,
            timeout=5
        )

    metric_tags.append(u'status_code:{}'.format(response.status_code))
    if response.status_code > 200:
        metric_tags.append(u'result:failure')
    else:
        metric_tags.append(u'result:success')

    dog_stats_api.increment('comment_client.request.count', tags=metric_tags)

    if 200 < response.status_code < 500:
        raise CommentClientRequestError(response.text, response.status_code)
    # Heroku returns a 503 when an application is in maintenance mode
    elif response.status_code == 503:
        raise CommentClientMaintenanceError(response.text)
    elif response.status_code == 500:
        raise CommentClient500Error(response.text)
    else:
        if raw:
            return response.text
        else:
            data = response.json()
            if paged_results:
                dog_stats_api.histogram(
                    'comment_client.request.paged.result_count',
                    value=len(data.get('collection', [])),
                    tags=metric_tags
                )
                dog_stats_api.histogram(
                    'comment_client.request.paged.page',
                    value=data.get('page', 1),
                    tags=metric_tags
                )
                dog_stats_api.histogram(
                    'comment_client.request.paged.num_pages',
                    value=data.get('num_pages', 1),
                    tags=metric_tags
                )
            return data