コード例 #1
0
ファイル: training.py プロジェクト: Akif-Vohra/edx-ora2
def _log_complete_reschedule_training(course_id=None, item_id=None, seconds=-1, success=False):
    """
    Sends the total time the rescheduling of training tasks took to datadog
    Note that this function may be invoked multiple times per call to reschedule_training_tasks,
    because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
    we log the time elapsed before trying again.)

    Args:
        course_id (unicode): the course_id to tag the task with
        item_id (unicode): the item_id to tag the task with
        seconds (int): the number of seconds that elapsed during the rescheduling task.
        success (bool): indicates whether or not all attempts to reschedule were successful
    """
    tags = [
        u"course_id:{}".format(course_id),
        u"item_id:{}".format(item_id),
        u"success:{}".format(success)
    ]

    dog_stats_api.histogram('openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time', seconds,tags)
    dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.completed_count', tags)

    msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
    if not success:
        msg += u" At least one rescheduling task failed due to internal error."
    msg.format(cid=course_id, iid=item_id, s=seconds)
    logger.info(msg)
コード例 #2
0
    def _log_complete_workflow(self):
        """
        A logging operation called at the end of an AI Workflow's Life
        Reports the total time the task took.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        # Calculates the time taken to complete the task and reports it to datadog
        time_delta = self.completed_at - self.scheduled_at
        dog_stats_api.histogram(data_path + '.turnaround_time',
                                time_delta.total_seconds(),
                                tags=tags)

        dog_stats_api.increment(data_path + '.completed_count', tags=tags)

        logger.info((
            u"{class_name} with uuid {uuid} completed its workflow successfully "
            u"in {seconds} seconds.").format(
                class_name=class_name,
                uuid=self.uuid,
                seconds=time_delta.total_seconds()))
コード例 #3
0
ファイル: models.py プロジェクト: felipemontoya/edx-platform
    def purchased_callback(self):
        """
        When purchased, this should enroll the user in the course.  We are assuming that
        course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
        in CourseEnrollmentAllowed will the user be allowed to enroll.  Otherwise requiring payment
        would in fact be quite silly since there's a clear back door.
        """
        try:
            course_loc = CourseDescriptor.id_to_location(self.course_id)
            course_exists = modulestore().has_item(self.course_id, course_loc)
        except ValueError:
            raise PurchasedCallbackException(
                "The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))

        if not course_exists:
            raise PurchasedCallbackException(
                "The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))

        CourseEnrollment.enroll(user=self.user, course_id=self.course_id, mode=self.mode)

        log.info("Enrolled {0} in paid course {1}, paid ${2}".format(self.user.email, self.course_id, self.line_cost))
        org, course_num, run = self.course_id.split("/")
        dog_stats_api.increment(
            "shoppingcart.PaidCourseRegistration.purchased_callback.enrollment",
            tags=["org:{0}".format(org),
                  "course:{0}".format(course_num),
                  "run:{0}".format(run)]
        )
コード例 #4
0
def _log_workflow(submission_uuid, workflow):
    """
    Log the creation of a peer-assessment workflow.

    Args:
        submission_uuid (str): The UUID of the submission being assessed.
        workflow (PeerWorkflow): The Peer Workflow of the student making the
            assessment.
    """
    logger.info(
        u"Retrieved submission {} ({}, {}) to be assessed by {}"
        .format(
            submission_uuid,
            workflow.course_id,
            workflow.item_id,
            workflow.student_id,
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=workflow.course_id),
        u"item_id:{item_id}".format(item_id=workflow.item_id),
        u"type:peer"
    ]

    # Over-grading is always turned on
    # Keep this tag for backwards-compatibility
    tags.append(u"overgrading")

    dog_stats_api.increment('openassessment.assessment.peer_workflow.count', tags=tags)
コード例 #5
0
    def send_messages(self, email_messages):

        # check settings hook for rewriting email recipient, act accordingly
        if settings.EMAIL_REWRITE_RECIPIENT:
            for message in email_messages:
                message.to = [settings.EMAIL_REWRITE_RECIPIENT]
        if settings.EMAIL_SEND_COPY_TO:
            for message in email_messages:
                message.bcc = [settings.EMAIL_SEND_COPY_TO]

        # send the messages
        t = time.time()
        msg_count = self._backend.send_messages(email_messages)
        elapsed = time.time() - t
        if msg_count > 0:
            logger.info('sent %s messages, elapsed: %.3fs' % (msg_count, elapsed))
            # report an average timing to datadog
            dog_stats_api.histogram('notifier.send.time', elapsed / msg_count)
            dog_stats_api.increment('notifier.send.count', msg_count)
            for msg in email_messages:
                hdrs = dict((k, v) for k, v in dict(msg.message()).iteritems()
                            if k.lower() not in ('date', 'from', 'subject', 'content-type', 'mime-version'))
                logger.info("sent email: {}".format(repr(hdrs)))
        if msg_count != len(email_messages):
            logger.warn('send_messages() was called with %s messages but return value was %s',
                len(email_messages), msg_count)
        return msg_count
コード例 #6
0
ファイル: peer.py プロジェクト: caesar2164/edx-ora2
def _log_workflow(submission_uuid, workflow):
    """
    Log the creation of a peer-assessment workflow.

    Args:
        submission_uuid (str): The UUID of the submission being assessed.
        workflow (PeerWorkflow): The Peer Workflow of the student making the
            assessment.
    """
    logger.info(
        u"Retrieved submission {} ({}, {}) to be assessed by {}"
        .format(
            submission_uuid,
            workflow.course_id,
            workflow.item_id,
            workflow.student_id,
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=workflow.course_id),
        u"item_id:{item_id}".format(item_id=workflow.item_id),
        u"type:peer"
    ]

    # Over-grading is always turned on
    # Keep this tag for backwards-compatibility
    tags.append(u"overgrading")

    dog_stats_api.increment('openassessment.assessment.peer_workflow.count', tags=tags)
コード例 #7
0
ファイル: wrapper.py プロジェクト: 10clouds/edx-platform
def increment(metric_name, *args, **kwargs):
    """
    Wrapper around dog_stats_api.increment that cleans any tags used.
    """
    if "tags" in kwargs:
        kwargs["tags"] = _clean_tags(kwargs["tags"])
    dog_stats_api.increment(metric_name, *args, **kwargs)
コード例 #8
0
ファイル: api.py プロジェクト: jbau/edx-ora2
def _log_submission(submission, student_item):
    """
    Log the creation of a submission.

    Args:
        submission (dict): The serialized submission model.
        student_item (dict): The serialized student item model.

    Returns:
        None
    """
    logger.info(
        u"Created submission uuid={submission_uuid} for "
        u"(course_id={course_id}, item_id={item_id}, "
        u"anonymous_student_id={anonymous_student_id})"
        .format(
            submission_uuid=submission["uuid"],
            course_id=student_item["course_id"],
            item_id=student_item["item_id"],
            anonymous_student_id=student_item["student_id"]
        )
    )
    tags = [
        u"course_id:{course_id}".format(course_id=student_item['course_id']),
        u"item_id:{item_id}".format(item_id=student_item['item_id']),
        u"item_type:{item_type}".format(item_type=student_item['item_type']),
    ]
    dog_stats_api.histogram('submissions.submission.size', len(submission['answer']), tags=tags)
    dog_stats_api.increment('submissions.submission.count', tags=tags)
コード例 #9
0
    def handle_grade_event(block, event_type, event):
        user_id = event.get('user_id', user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #10
0
    def process_exception(self, request, exception):
        """ Captures Django view exceptions as Datadog events """

        # Get a formatted version of the traceback.
        exc = traceback.format_exc()

        # Make request.META json-serializable.
        szble = {}
        for k, v in request.META.items():
            if isinstance(v, (list, basestring, bool, int, float, long)):
                szble[k] = v
            else:
                szble[k] = str(v)

        title = 'Exception from {0}'.format(request.path)
        text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \
            .format(exc, json.dumps(szble, indent=2))

        # Submit the exception to Datadog
        api.event(title, text, tags=self.event_tags, aggregation_key=request.path,
            alert_type='error')

        # Increment our errors metric
        tags = self._get_metric_tags(request)
        dog_stats_api.increment(self.error_metric, tags=tags)
コード例 #11
0
ファイル: models.py プロジェクト: felipemontoya/edx-platform
    def purchased_callback(self):
        """
        When purchased, this should enroll the user in the course.  We are assuming that
        course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
        in CourseEnrollmentAllowed will the user be allowed to enroll.  Otherwise requiring payment
        would in fact be quite silly since there's a clear back door.
        """
        try:
            course_loc = CourseDescriptor.id_to_location(self.course_id)
            course_exists = modulestore().has_item(self.course_id, course_loc)
        except ValueError:
            raise PurchasedCallbackException(
                "The customer purchased Course {0}, but that course doesn't exist!"
                .format(self.course_id))

        if not course_exists:
            raise PurchasedCallbackException(
                "The customer purchased Course {0}, but that course doesn't exist!"
                .format(self.course_id))

        CourseEnrollment.enroll(user=self.user,
                                course_id=self.course_id,
                                mode=self.mode)

        log.info("Enrolled {0} in paid course {1}, paid ${2}".format(
            self.user.email, self.course_id, self.line_cost))
        org, course_num, run = self.course_id.split("/")
        dog_stats_api.increment(
            "shoppingcart.PaidCourseRegistration.purchased_callback.enrollment",
            tags=[
                "org:{0}".format(org), "course:{0}".format(course_num),
                "run:{0}".format(run)
            ])
コード例 #12
0
ファイル: peer_api.py プロジェクト: mulby/edx-ora2
def _log_workflow(submission_uuid, workflow, over_grading):
    """
    Log the creation of a peer-assessment workflow.

    Args:
        submission_uuid (str): The UUID of the submission being assessed.
        workflow (PeerWorkflow): The Peer Workflow of the student making the
            assessment.
        over_grading (bool): Whether over-grading is enabled.
    """
    logger.info(
        u"Retrieved submission {} ({}, {}) to be assessed by {}".format(
            submission_uuid,
            workflow.course_id,
            workflow.item_id,
            workflow.student_id,
        ))

    tags = [
        u"course_id:{course_id}".format(course_id=workflow.course_id),
        u"item_id:{item_id}".format(item_id=workflow.item_id), u"type:peer"
    ]

    if over_grading:
        tags.append(u"overgrading")

    dog_stats_api.increment('openassessment.assessment.peer_workflow.count',
                            tags=tags)
コード例 #13
0
    def handle_grade_event(block, event_type, event):
        user_id = event.get('user_id', user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
        course_id_dict = Location.parse_course_id(course_id)

        tags = [
            u"org:{org}".format(**course_id_dict),
            u"course:{course}".format(**course_id_dict),
            u"run:{name}".format(**course_id_dict),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #14
0
ファイル: wrapper.py プロジェクト: jswope00/griffinx
def increment(metric_name, *args, **kwargs):
    """
    Wrapper around dog_stats_api.increment that cleans any tags used.
    """
    if "tags" in kwargs:
        kwargs["tags"] = _clean_tags(kwargs["tags"])
    dog_stats_api.increment(metric_name, *args, **kwargs)
コード例 #15
0
ファイル: subtasks.py プロジェクト: 6thfdwp/edx-platform
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Because select_for_update is used to lock the InstructorTask object while it is being updated,
    multiple subtasks updating at the same time may time out while waiting for the lock.
    The actual update operation is surrounded by a try/except/else that permits the update to be
    retried if the transaction times out.

    The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
    the attempting of retries has concluded.
    """
    try:
        _update_subtask_status(entry_id, current_task_id, new_subtask_status)
    except DatabaseError:
        # If we fail, try again recursively.
        retry_count += 1
        if retry_count < MAX_DATABASE_LOCK_RETRIES:
            TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s:  retry %d",
                          current_task_id, entry_id, new_subtask_status, retry_count)
            dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
            update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
        else:
            TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
                          retry_count, current_task_id, entry_id, new_subtask_status)
            dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
            raise
    finally:
        # Only release the lock on the subtask when we're done trying to update it.
        # Note that this will be called each time a recursive call to update_subtask_status()
        # returns.  Fortunately, it's okay to release a lock that has already been released.
        _release_subtask_lock(current_task_id)
コード例 #16
0
ファイル: ai.py プロジェクト: louyihua/edx-ora2
    def _log_complete_workflow(self):
        """
        A logging operation called at the end of an AI Workflow's Life
        Reports the total time the task took.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        # Calculates the time taken to complete the task and reports it to datadog
        time_delta = self.completed_at - self.scheduled_at
        dog_stats_api.histogram(
            data_path + '.turnaround_time',
            time_delta.total_seconds(),
            tags=tags
        )

        dog_stats_api.increment(data_path + '.completed_count', tags=tags)

        logger.info(
            (
                u"{class_name} with uuid {uuid} completed its workflow successfully "
                u"in {seconds} seconds."
            ).format(class_name=class_name, uuid=self.uuid, seconds=time_delta.total_seconds())
        )
コード例 #17
0
    def publish(event):
        """A function that allows XModules to publish events. This only supports grade changes right now."""
        if event.get('event_name') != 'grade':
            return

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            student_id=user.id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
        org, course_num, run = course_id.split("/")

        tags = [
            "org:{0}".format(org),
            "course:{0}".format(course_num),
            "run:{0}".format(run),
            "score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #18
0
ファイル: peer_api.py プロジェクト: jbau/edx-ora2
def _log_workflow(submission_uuid, student_item, over_grading):
    """
    Log the creation of a peer-assessment workflow.

    Args:
        submission_uuid (str): The UUID of the submission being assessed.
        student_item (dict): The serialized student item of the student making the assessment.
        over_grading (bool): Whether over-grading is enabled.
    """
    logger.info(
        u"Retrieved submission {} ({}, {}) to be assessed by {}"
        .format(
            submission_uuid,
            student_item["course_id"],
            student_item["item_id"],
            student_item["student_id"],
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=student_item['course_id']),
        u"item_id:{item_id}".format(item_id=student_item['item_id']),
        u"type:peer"
    ]

    if over_grading:
        tags.append(u"overgrading")

    dog_stats_api.increment('openassessment.assessment.peer_workflow.count', tags=tags)
コード例 #19
0
def m2m_changed_metrics(sender, **kwargs):
    """
    Record the number of times that Many2Many fields are updated. This is separated
    from post_save and post_delete, because it's signaled by the database model in
    the middle of the Many2Many relationship, rather than either of the models
    that are the relationship participants.

    Args:
        sender (Model): The model class in the middle of the Many2Many relationship.
        action (str): The action being taken on this Many2Many relationship.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance whose many-to-many relation is being modified.
        model (Model class): The model of the class being added/removed/cleared from the relation.
    """
    if 'action' not in kwargs:
        return

    action = {
        'post_add': 'm2m.added',
        'post_remove': 'm2m.removed',
        'post_clear': 'm2m.cleared',
    }.get(kwargs['action'])

    if not action:
        return

    tags = _database_tags(action, sender, kwargs)

    if 'model' in kwargs:
        tags.append('target_class:{}'.format(kwargs['model'].__name__))

    dog_stats_api.increment('edxapp.db.model',
                            value=len(kwargs.get('pk_set', [])),
                            tags=tags)
コード例 #20
0
    def process_exception(self, request, exception):
        """ Captures Django view exceptions as Datadog events """

        # Get a formatted version of the traceback.
        exc = traceback.format_exc()

        # Make request.META json-serializable.
        szble = {}
        for k, v in request.META.items():
            if isinstance(v, (list, basestring, bool, int, float, long)):
                szble[k] = v
            else:
                szble[k] = str(v)

        title = 'Exception from {0}'.format(request.path)
        text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \
            .format(exc, json.dumps(szble, indent=2))

        # Submit the exception to Datadog
        api.event(title, text, tags=self.event_tags, aggregation_key=request.path,
            alert_type='error')

        # Increment our errors metric
        tags = self._get_metric_tags(request)
        dog_stats_api.increment(self.error_metric, tags=tags)
コード例 #21
0
    def send_messages(self, email_messages):

        # check settings hook for rewriting email recipient, act accordingly
        if settings.EMAIL_REWRITE_RECIPIENT:
            for message in email_messages:
                message.to = [settings.EMAIL_REWRITE_RECIPIENT]

        # send the messages
        t = time.time()
        msg_count = self._backend.send_messages(email_messages)
        elapsed = time.time() - t
        if msg_count > 0:
            logger.info('sent %s messages, elapsed: %.3fs' %
                        (msg_count, elapsed))
            # report an average timing to datadog
            dog_stats_api.histogram('notifier.send.time', elapsed / msg_count)
            dog_stats_api.increment('notifier.send.count', msg_count)
            for msg in email_messages:
                hdrs = dict(
                    (k, v) for k, v in dict(msg.message()).iteritems()
                    if k.lower() not in ('date', 'from', 'subject',
                                         'content-type', 'mime-version'))
                logger.info("sent email: {}".format(repr(hdrs)))
        if msg_count != len(email_messages):
            logger.warn(
                'send_messages() was called with %s messages but return value was %s',
                len(email_messages), msg_count)
        return msg_count
コード例 #22
0
def _log_score(score):
    """
    Log the creation of a score.

    Args:
        score (Score): The score model.

    Returns:
        None
    """
    logger.info("Score of ({}/{}) set for submission {}".format(
        score.points_earned, score.points_possible, score.submission.uuid))
    tags = [
        u"course_id:{course_id}".format(
            course_id=score.student_item.course_id),
        u"item_id:{item_id}".format(item_id=score.student_item.item_id),
        u"item_type:{item_type}".format(
            item_type=score.student_item.item_type),
    ]

    time_delta = score.created_at - score.submission.created_at
    dog_stats_api.histogram('submissions.score.seconds_since_submission',
                            time_delta.total_seconds(),
                            tags=tags)

    score_percentage = score.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('submissions.score.score_percentage',
                                score_percentage,
                                tags=tags)

    dog_stats_api.increment('submissions.score.count', tags=tags)
コード例 #23
0
def _log_complete_reschedule_training(course_id=None,
                                      item_id=None,
                                      seconds=-1,
                                      success=False):
    """
    Sends the total time the rescheduling of training tasks took to datadog
    Note that this function may be invoked multiple times per call to reschedule_training_tasks,
    because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
    we log the time elapsed before trying again.)

    Args:
        course_id (unicode): the course_id to tag the task with
        item_id (unicode): the item_id to tag the task with
        seconds (int): the number of seconds that elapsed during the rescheduling task.
        success (bool): indicates whether or not all attempts to reschedule were successful
    """
    tags = [
        u"course_id:{}".format(course_id), u"item_id:{}".format(item_id),
        u"success:{}".format(success)
    ]

    dog_stats_api.histogram(
        'openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time',
        seconds, tags)
    dog_stats_api.increment(
        'openassessment.assessment.ai_task.AIRescheduleTraining.completed_count',
        tags)

    msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
    if not success:
        msg += u" At least one rescheduling task failed due to internal error."
    msg.format(cid=course_id, iid=item_id, s=seconds)
    logger.info(msg)
コード例 #24
0
ファイル: peer.py プロジェクト: devs1991/test_edx_docmode
def _log_assessment(assessment, scorer_workflow):
    """
    Log the creation of a peer assessment.

    Args:
        assessment (Assessment): The assessment model that was created.
        scorer_workflow (dict): A dictionary representation of the Workflow
            belonging to the scorer of this assessment.

    Returns:
        None

    """
    logger.info(
        u"Created peer-assessment {assessment_id} for submission "
        u"{submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}; scored by {scorer}".format(
            assessment_id=assessment.id,
            submission_uuid=assessment.submission_uuid,
            course_id=scorer_workflow.course_id,
            item_id=scorer_workflow.item_id,
            rubric_content_hash=assessment.rubric.content_hash,
            scorer=scorer_workflow.student_id,
        ))

    tags = [
        u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
        u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
        u"type:peer",
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('openassessment.assessment.score_percentage',
                                score_percentage,
                                tags=tags)

    # Calculate the time spent assessing
    # This is the time from when the scorer retrieved the submission
    # (created the peer workflow item) to when they completed an assessment.
    # By this point, the assessment *should* have an associated peer workflow item,
    # but if not, we simply skip the event.
    try:
        workflow_item = assessment.peerworkflowitem_set.get()
    except (PeerWorkflowItem.DoesNotExist,
            PeerWorkflowItem.MultipleObjectsReturned, DatabaseError):
        msg = u"Could not retrieve peer workflow item for assessment: {assessment}".format(
            assessment=assessment.id)
        logger.exception(msg)
        workflow_item = None

    if workflow_item is not None:
        time_delta = assessment.scored_at - workflow_item.started_at
        dog_stats_api.histogram(
            'openassessment.assessment.seconds_spent_assessing',
            time_delta.total_seconds(),
            tags=tags)

    dog_stats_api.increment('openassessment.assessment.count', tags=tags)
コード例 #25
0
ファイル: models.py プロジェクト: daveloper/edx-platform
    def update_enrollment(self, mode=None, is_active=None):
        """
        Updates an enrollment for a user in a class.  This includes options
        like changing the mode, toggling is_active True/False, etc.

        Also emits relevant events for analytics purposes.

        This saves immediately.
        """
        activation_changed = False
        # if is_active is None, then the call to update_enrollment didn't specify
        # any value, so just leave is_active as it is
        if self.is_active != is_active and is_active is not None:
            self.is_active = is_active
            activation_changed = True

        mode_changed = False
        # if mode is None, the call to update_enrollment didn't specify a new
        # mode, so leave as-is
        if self.mode != mode and mode is not None:
            self.mode = mode
            mode_changed = True

        if activation_changed or mode_changed:
            self.save()

        if activation_changed:
            if self.is_active:
                self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)

                dog_stats_api.increment(
                    "common.student.enrollment",
                    tags=[
                        u"org:{}".format(self.course_id.org),
                        u"offering:{}".format(self.course_id.offering),
                        u"mode:{}".format(self.mode),
                    ],
                )

            else:
                UNENROLL_DONE.send(sender=None, course_enrollment=self)

                self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)

                dog_stats_api.increment(
                    "common.student.unenrollment",
                    tags=[
                        u"org:{}".format(self.course_id.org),
                        u"offering:{}".format(self.course_id.offering),
                        u"mode:{}".format(self.mode),
                    ],
                )
        if mode_changed:
            # the user's default mode is "honor" and disabled for a course
            # mode change events will only be emitted when the user's mode changes from this
            self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED)
コード例 #26
0
ファイル: tracker.py プロジェクト: AlexxNica/edx-platform
def send(event):
    """
    Send an event object to all the initialized backends.

    """
    dog_stats_api.increment('track.send.count')

    for name, backend in backends.iteritems():
        with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
            backend.send(event)
コード例 #27
0
ファイル: peer.py プロジェクト: robertgerinlajoie/edx-ora2
def _log_assessment(assessment, scorer_workflow):
    """
    Log the creation of a peer assessment.

    Args:
        assessment (Assessment): The assessment model that was created.
        scorer_workflow (dict): A dictionary representation of the Workflow
            belonging to the scorer of this assessment.

    Returns:
        None

    """
    logger.info(
        u"Created peer-assessment {assessment_id} for submission "
        u"{submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}; scored by {scorer}".format(
            assessment_id=assessment.id,
            submission_uuid=assessment.submission_uuid,
            course_id=scorer_workflow.course_id,
            item_id=scorer_workflow.item_id,
            rubric_content_hash=assessment.rubric.content_hash,
            scorer=scorer_workflow.student_id,
        )
    )

    tags = [
        u"course_id:{course_id}".format(course_id=scorer_workflow.course_id),
        u"item_id:{item_id}".format(item_id=scorer_workflow.item_id),
        u"type:peer",
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram("openassessment.assessment.score_percentage", score_percentage, tags=tags)

    # Calculate the time spent assessing
    # This is the time from when the scorer retrieved the submission
    # (created the peer workflow item) to when they completed an assessment.
    # By this point, the assessment *should* have an associated peer workflow item,
    # but if not, we simply skip the event.
    try:
        workflow_item = assessment.peerworkflowitem_set.get()
    except (PeerWorkflowItem.DoesNotExist, PeerWorkflowItem.MultipleObjectsReturned, DatabaseError):
        msg = u"Could not retrieve peer workflow item for assessment: {assessment}".format(assessment=assessment.id)
        logger.exception(msg)
        workflow_item = None

    if workflow_item is not None:
        time_delta = assessment.scored_at - workflow_item.started_at
        dog_stats_api.histogram(
            "openassessment.assessment.seconds_spent_assessing", time_delta.total_seconds(), tags=tags
        )

    dog_stats_api.increment("openassessment.assessment.count", tags=tags)
コード例 #28
0
    def update_enrollment(self, mode=None, is_active=None):
        """
        Updates an enrollment for a user in a class.  This includes options
        like changing the mode, toggling is_active True/False, etc.

        Also emits relevant events for analytics purposes.

        This saves immediately.
        """
        activation_changed = False
        # if is_active is None, then the call to update_enrollment didn't specify
        # any value, so just leave is_active as it is
        if self.is_active != is_active and is_active is not None:
            self.is_active = is_active
            activation_changed = True

        mode_changed = False
        # if mode is None, the call to update_enrollment didn't specify a new
        # mode, so leave as-is
        if self.mode != mode and mode is not None:
            self.mode = mode
            mode_changed = True

        if activation_changed or mode_changed:
            self.save()

        if activation_changed:
            if self.is_active:
                self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)

                dog_stats_api.increment(
                    "common.student.enrollment",
                    tags=[
                        u"org:{}".format(self.course_id.org),
                        u"offering:{}".format(self.course_id.offering),
                        u"mode:{}".format(self.mode)
                    ])

            else:
                unenroll_done.send(sender=None, course_enrollment=self)

                self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)

                dog_stats_api.increment(
                    "common.student.unenrollment",
                    tags=[
                        u"org:{}".format(self.course_id.org),
                        u"offering:{}".format(self.course_id.offering),
                        u"mode:{}".format(self.mode)
                    ])
        if mode_changed:
            # the user's default mode is "honor" and disabled for a course
            # mode change events will only be emitted when the user's mode changes from this
            self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED)
コード例 #29
0
def post_delete_metrics(sender, **kwargs):
    """
    Record the number of times that django models are deleted.

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance being deleted (optional).
    """
    tags = _database_tags('deleted', sender, kwargs)

    dog_stats_api.increment('edxapp.db.model', tags=tags)
コード例 #30
0
ファイル: signals.py プロジェクト: 1amongus/edx-platform
def post_delete_metrics(sender, **kwargs):
    """
    Record the number of times that django models are deleted.

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance being deleted (optional).
    """
    tags = _database_tags('deleted', sender, kwargs)

    dog_stats_api.increment('edxapp.db.model', tags=tags)
コード例 #31
0
    def update_enrollment(self, mode=None, is_active=None):
        """
        Updates an enrollment for a user in a class.  This includes options
        like changing the mode, toggling is_active True/False, etc.

        Also emits relevant events for analytics purposes.

        This saves immediately.
        """
        activation_changed = False
        # if is_active is None, then the call to update_enrollment didn't specify
        # any value, so just leave is_active as it is
        if self.is_active != is_active and is_active is not None:
            self.is_active = is_active
            activation_changed = True

        mode_changed = False
        # if mode is None, the call to update_enrollment didn't specify a new
        # mode, so leave as-is
        if self.mode != mode and mode is not None:
            self.mode = mode
            mode_changed = True

        if activation_changed or mode_changed:
            self.save()
        if activation_changed:
            course_id_dict = Location.parse_course_id(self.course_id)
            if self.is_active:
                self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)

                dog_stats_api.increment(
                    "common.student.enrollment",
                    tags=[
                        u"org:{org}".format(**course_id_dict),
                        u"course:{course}".format(**course_id_dict),
                        u"run:{name}".format(**course_id_dict),
                        u"mode:{}".format(self.mode)
                    ])

            else:
                unenroll_done.send(sender=None, course_enrollment=self)

                self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)

                dog_stats_api.increment(
                    "common.student.unenrollment",
                    tags=[
                        u"org:{org}".format(**course_id_dict),
                        u"course:{course}".format(**course_id_dict),
                        u"run:{name}".format(**course_id_dict),
                        u"mode:{}".format(self.mode)
                    ])
コード例 #32
0
ファイル: signals.py プロジェクト: 1amongus/edx-platform
def post_save_metrics(sender, **kwargs):
    """
    Record the number of times that django models are saved (created or updated).

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this update (optional).
        instance (Model instance): The instance being updated (optional).
    """
    action = 'created' if kwargs.pop('created', False) else 'updated'

    tags = _database_tags(action, sender, kwargs)
    dog_stats_api.increment('edxapp.db.model', tags=tags)
コード例 #33
0
def post_save_metrics(sender, **kwargs):
    """
    Record the number of times that django models are saved (created or updated).

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this update (optional).
        instance (Model instance): The instance being updated (optional).
    """
    action = 'created' if kwargs.pop('created', False) else 'updated'

    tags = _database_tags(action, sender, kwargs)
    dog_stats_api.increment('edxapp.db.model', tags=tags)
コード例 #34
0
    def post(self, request, course_id, format=None):
        """ Enroll in Course """
        user = request.user
        err = {}
        try:
            course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)

            course = course_from_key(course_key)
        except ItemNotFoundError:
            err['err_type'] = 'InvalidCourseId'
            err['err_msg'] = _("Course id is invalid")
            return Response(err, status=status.HTTP_400_BAD_REQUEST)

        if not has_access(user, 'enroll', course):
            err['err_type'] = 'InvalidEnrollment'
            err['err_msg'] = _("Enrollment is closed")
            return Response(err, status=status.HTTP_400_BAD_REQUEST)

        # see if we have already filled up all allowed enrollments
        is_course_full = CourseEnrollment.is_course_full(course)

        if is_course_full:
            err['err_type'] = 'InvalidEnrollment'
            err['err_msg'] = _("Course is full")
            return Response(err, status=status.HTTP_400_BAD_REQUEST)

        # If this course is available in multiple modes, redirect them to a page
        # where they can choose which mode they want.
        available_modes = CourseMode.modes_for_course(course_id)
        available_modes_dict = CourseMode.modes_for_course_dict(course_id, available_modes)
        if CourseMode.has_verified_mode(available_modes_dict):
            err['err_type'] = 'InvalidEnrollment'
            err['err_msg'] = _("Missing course mode")
            return Response(err, status=status.HTTP_400_BAD_REQUEST)

        current_mode = available_modes[0]
        course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
        dog_stats_api.increment(
            "common.student.enrollment",
            tags=[u"org:{0}".format(course_key.org),
                  u"course:{0}".format(course_key.course),
                  u"run:{0}".format(course_key.run)]
        )
        server_track(request, 'api.course.enrollment', {
            'username': user.username,
            'course_id': course_id,
        })

        CourseEnrollment.enroll(user, course.id, mode=current_mode.slug)
        return Response()
コード例 #35
0
ファイル: models.py プロジェクト: cecep-edu/edx-platform
    def update_enrollment(self, mode=None, is_active=None):
        """
        Updates an enrollment for a user in a class.  This includes options
        like changing the mode, toggling is_active True/False, etc.

        Also emits relevant events for analytics purposes.

        This saves immediately.
        """
        activation_changed = False
        # if is_active is None, then the call to update_enrollment didn't specify
        # any value, so just leave is_active as it is
        if self.is_active != is_active and is_active is not None:
            self.is_active = is_active
            activation_changed = True

        mode_changed = False
        # if mode is None, the call to update_enrollment didn't specify a new
        # mode, so leave as-is
        if self.mode != mode and mode is not None:
            self.mode = mode
            mode_changed = True

        if activation_changed or mode_changed:
            self.save()
        if activation_changed:
            course_id_dict = Location.parse_course_id(self.course_id)
            if self.is_active:
                self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)

                dog_stats_api.increment(
                    "common.student.enrollment",
                    tags=[u"org:{org}".format(**course_id_dict),
                          u"course:{course}".format(**course_id_dict),
                          u"run:{name}".format(**course_id_dict),
                          u"mode:{}".format(self.mode)]
                )

            else:
                unenroll_done.send(sender=None, course_enrollment=self)

                self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)

                dog_stats_api.increment(
                    "common.student.unenrollment",
                    tags=[u"org:{org}".format(**course_id_dict),
                          u"course:{course}".format(**course_id_dict),
                          u"run:{name}".format(**course_id_dict),
                          u"mode:{}".format(self.mode)]
                )
コード例 #36
0
    def _record_result(self, action, data, tags=None):
        """
        Log results from an API call to an ORA service to datadog.

        Arguments:
            action (str): The ORA action being recorded.
            data (dict): The data returned from the ORA service. Should contain the key 'success'.
            tags (list): A list of tags to attach to the logged metric.
        """
        if tags is None:
            tags = []

        tags.append(u"result:{}".format(data.get("success", False)))
        tags.append(u"action:{}".format(action))
        dog_stats_api.increment(self._metric_name("request.count"), tags=tags)
コード例 #37
0
    def _record_result(self, action, data, tags=None):
        """
        Log results from an API call to an ORA service to datadog.

        Arguments:
            action (str): The ORA action being recorded.
            data (dict): The data returned from the ORA service. Should contain the key 'success'.
            tags (list): A list of tags to attach to the logged metric.
        """
        if tags is None:
            tags = []

        tags.append(u'result:{}'.format(data.get('success', False)))
        tags.append(u'action:{}'.format(action))
        dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
コード例 #38
0
ファイル: training.py プロジェクト: Akif-Vohra/edx-ora2
def _log_start_reschedule_training(course_id=None, item_id=None):
    """
    Sends data about the rescheduling_training task to datadog

    Args:
        course_id (unicode): the course id to associate with the log start
        item_id (unicode): the item id to tag with the log start
    """
    tags = [
        u"course_id:{}".format(course_id),
        u"item_id:{}".format(item_id),
    ]
    dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.scheduled_count', tags)

    msg = u"Rescheduling of incomplete training tasks began for course_id={cid} and item_id={iid}"
    logger.info(msg.format(cid=course_id, iid=item_id))
コード例 #39
0
 def delete(self, request, course_id, format=None):
     """ Unenroll the current course. """
     user = request.user
     if not CourseEnrollment.is_enrolled(user, course_id):
         return error.ErrorResponse(error.USER_NOT_ENROLLED, "Course id is invalid", status=status.HTTP_404_NOT_FOUND)
     course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
     CourseEnrollment.unenroll(user, course_key)
     dog_stats_api.increment(
         "common.student.unenrollment",
         tags=["org:{0}".format(course_key.org),
               "course:{0}".format(course_key.course),
               "run:{0}".format(course_key.run)]
     )
     server_track(request, 'api.course.unenrollment', {
         'username': user.username,
         'course_id': course_id,
     })
     return Response(status=status.HTTP_204_NO_CONTENT)
コード例 #40
0
def _log_start_reschedule_training(course_id=None, item_id=None):
    """
    Sends data about the rescheduling_training task to datadog

    Args:
        course_id (unicode): the course id to associate with the log start
        item_id (unicode): the item id to tag with the log start
    """
    tags = [
        u"course_id:{}".format(course_id),
        u"item_id:{}".format(item_id),
    ]
    dog_stats_api.increment(
        'openassessment.assessment.ai_task.AIRescheduleTraining.scheduled_count',
        tags)

    msg = u"Rescheduling of incomplete training tasks began for course_id={cid} and item_id={iid}"
    logger.info(msg.format(cid=course_id, iid=item_id))
コード例 #41
0
ファイル: x_module.py プロジェクト: bmcdonald2/edx-platform
    def render(self, block, view_name, context=None):
        try:
            status = "success"
            return super(MetricsMixin, self).render(block, view_name, context=context)

        except:
            status = "failure"
            raise

        finally:
            course_id = getattr(self, 'course_id', '')
            dog_stats_api.increment(XMODULE_METRIC_NAME, tags=[
                u'view_name:{}'.format(view_name),
                u'action:render',
                u'action_status:{}'.format(status),
                u'course_id:{}'.format(course_id),
                u'block_type:{}'.format(block.scope_ids.block_type)
            ])
コード例 #42
0
ファイル: ai.py プロジェクト: louyihua/edx-ora2
    def _log_start_workflow(self):
        """
        A logging operation called at the beginning of an AI Workflows life.
        Increments the number of tasks of that kind.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        # Sets identity tags which allow sorting by course and item
        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        logger.info(u"{class_name} with uuid {uuid} was started.".format(class_name=class_name, uuid=self.uuid))

        dog_stats_api.increment(data_path + '.scheduled_count', tags=tags)
コード例 #43
0
ファイル: x_module.py プロジェクト: bmcdonald2/edx-platform
    def handle(self, block, handler_name, request, suffix=''):
        handle = None
        try:
            status = "success"
            return super(MetricsMixin, self).handle(block, handler_name, request, suffix=suffix)

        except:
            status = "failure"
            raise

        finally:
            course_id = getattr(self, 'course_id', '')
            dog_stats_api.increment(XMODULE_METRIC_NAME, tags=[
                u'handler_name:{}'.format(handler_name),
                u'action:handle',
                u'action_status:{}'.format(status),
                u'course_id:{}'.format(course_id),
                u'block_type:{}'.format(block.scope_ids.block_type)
            ])
コード例 #44
0
ファイル: subtasks.py プロジェクト: Appius/edx-platform
def track_memory_usage(metric, course_id):
    """
    Context manager to track how much memory (in bytes) a given process uses.
    Metrics will look like: 'course_email.subtask_generation.memory.rss'
    or 'course_email.subtask_generation.memory.vms'.
    """
    memory_types = ['rss', 'vms']
    process = psutil.Process()
    baseline_memory_info = process.get_memory_info()
    baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
    yield
    for memory_type, baseline_usage in zip(memory_types, baseline_usages):
        total_memory_info = process.get_memory_info()
        total_usage = getattr(total_memory_info, memory_type)
        memory_used = total_usage - baseline_usage
        dog_stats_api.increment(
            metric + "." + memory_type,
            memory_used,
            tags=["course_id:{}".format(course_id)],
        )
コード例 #45
0
    def _log_start_workflow(self):
        """
        A logging operation called at the beginning of an AI Workflows life.
        Increments the number of tasks of that kind.
        """

        # Identifies whether the type of task for reporting
        class_name = self.__class__.__name__
        data_path = 'openassessment.assessment.ai_task.' + class_name

        # Sets identity tags which allow sorting by course and item
        tags = [
            u"course_id:{course_id}".format(course_id=self.course_id),
            u"item_id:{item_id}".format(item_id=self.item_id),
        ]

        logger.info(u"{class_name} with uuid {uuid} was started.".format(
            class_name=class_name, uuid=self.uuid))

        dog_stats_api.increment(data_path + '.scheduled_count', tags=tags)
コード例 #46
0
 def delete(self, request, course_id, format=None):
     """ Unenroll in Course """
     user = request.user
     err = {}
     if not CourseEnrollment.is_enrolled(user, course_id):
         err['err_type'] = 'UserNotEnrolled'
         err['err_msg'] = _("You are not enrolled in this course")
         return Response(err, status=status.HTTP_400_BAD_REQUEST)
     course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
     CourseEnrollment.unenroll(user, course_key)
     dog_stats_api.increment(
         "common.student.unenrollment",
         tags=[u"org:{0}".format(course_key.org),
               u"course:{0}".format(course_key.course),
               u"run:{0}".format(course_key.run)]
     )
     server_track(request, 'api.course.unenrollment', {
         'username': user.username,
         'course_id': course_id,
     })
     return Response()
コード例 #47
0
ファイル: x_module.py プロジェクト: ybergner/edx-platform
    def render(self, block, view_name, context=None):
        try:
            status = "success"
            return super(MetricsMixin, self).render(block,
                                                    view_name,
                                                    context=context)

        except:
            status = "failure"
            raise

        finally:
            course_id = getattr(self, 'course_id', '')
            dog_stats_api.increment(
                XMODULE_METRIC_NAME,
                tags=[
                    u'view_name:{}'.format(view_name), u'action:render',
                    u'action_status:{}'.format(status),
                    u'course_id:{}'.format(course_id),
                    u'block_type:{}'.format(block.scope_ids.block_type)
                ])
コード例 #48
0
def send(event):
    """
    Send an event object to all the initialized backends.

    """
    dog_stats_api.increment('track.send.count')

    for name, backend in backends.iteritems():
        with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
            backend.send(event)
            
    if settings.ANALITICA_ACTIVE:
        event['time'] = time.time()
        r =\
            requests.post(
                settings.ANALITICA_TRACK_URL,
                headers={'Authorization': settings.ANALITICA_TOKEN},
                json=event
            )
        if r.status_code != 200:
            log.error("Failed to post to the tracking backend with error {e}".format(e=r.json()))
コード例 #49
0
    def publish(block, event, custom_user=None):
        """A function that allows XModules to publish events. This only supports grade changes right now."""
        if event.get('event_name') != 'grade':
            return

        if custom_user:
            user_id = custom_user.id
        else:
            user_id = user.id

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
        course_id_dict = Location.parse_course_id(course_id)

        tags = [
            u"org:{org}".format(**course_id_dict),
            u"course:{course}".format(**course_id_dict),
            u"run:{name}".format(**course_id_dict),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #50
0
    def publish(block, event, custom_user=None):
        """A function that allows XModules to publish events. This only supports grade changes right now."""
        if event.get('event_name') != 'grade':
            return

        if custom_user:
            user_id = custom_user.id
        else:
            user_id = user.id

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
        course_id_dict = Location.parse_course_id(course_id)

        tags = [
            u"org:{org}".format(**course_id_dict),
            u"course:{course}".format(**course_id_dict),
            u"run:{name}".format(**course_id_dict),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #51
0
def update_subtask_status(entry_id,
                          current_task_id,
                          new_subtask_status,
                          retry_count=0):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Because select_for_update is used to lock the InstructorTask object while it is being updated,
    multiple subtasks updating at the same time may time out while waiting for the lock.
    The actual update operation is surrounded by a try/except/else that permits the update to be
    retried if the transaction times out.

    The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
    the attempting of retries has concluded.
    """
    try:
        _update_subtask_status(entry_id, current_task_id, new_subtask_status)
    except DatabaseError:
        # If we fail, try again recursively.
        retry_count += 1
        if retry_count < MAX_DATABASE_LOCK_RETRIES:
            TASK_LOG.info(
                "Retrying to update status for subtask %s of instructor task %d with status %s:  retry %d",
                current_task_id, entry_id, new_subtask_status, retry_count)
            dog_stats_api.increment(
                'instructor_task.subtask.retry_after_failed_update')
            update_subtask_status(entry_id, current_task_id,
                                  new_subtask_status, retry_count)
        else:
            TASK_LOG.info(
                "Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
                retry_count, current_task_id, entry_id, new_subtask_status)
            dog_stats_api.increment(
                'instructor_task.subtask.failed_after_update_retries')
            raise
    finally:
        # Only release the lock on the subtask when we're done trying to update it.
        # Note that this will be called each time a recursive call to update_subtask_status()
        # returns.  Fortunately, it's okay to release a lock that has already been released.
        _release_subtask_lock(current_task_id)
コード例 #52
0
    def send_to_queue(self, header, body, files_to_upload=None):
        """
        Submit a request to xqueue.

        header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'

        body: Serialized data for the receipient behind the queueing service. The operation of
                xqueue is agnostic to the contents of 'body'

        files_to_upload: List of file objects to be uploaded to xqueue along with queue request

        Returns (error_code, msg) where error_code != 0 indicates an error
        """

        # log the send to xqueue
        header_info = json.loads(header)
        queue_name = header_info.get('queue_name', u'')
        dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
            u'action:send_to_queue',
            u'queue:{}'.format(queue_name)
        ])

        # Attempt to send to queue
        (error, msg) = self._send_to_queue(header, body, files_to_upload)

        # Log in, then try again
        if error and (msg == 'login_required'):
            (error, content) = self._login()
            if error != 0:
                # when the login fails
                log.debug("Failed to login to queue: %s", content)
                return (error, content)
            if files_to_upload is not None:
                # Need to rewind file pointers
                for f in files_to_upload:
                    f.seek(0)
            (error, msg) = self._send_to_queue(header, body, files_to_upload)

        return (error, msg)
コード例 #53
0
    def send_to_queue(self, header, body, files_to_upload=None):
        """
        Submit a request to xqueue.

        header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'

        body: Serialized data for the receipient behind the queueing service. The operation of
                xqueue is agnostic to the contents of 'body'

        files_to_upload: List of file objects to be uploaded to xqueue along with queue request

        Returns (error_code, msg) where error_code != 0 indicates an error
        """

        # log the send to xqueue
        header_info = json.loads(header)
        queue_name = header_info.get('queue_name', u'')
        dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
            u'action:send_to_queue',
            u'queue:{}'.format(queue_name)
        ])

        # Attempt to send to queue
        (error, msg) = self._send_to_queue(header, body, files_to_upload)

        # Log in, then try again
        if error and (msg == 'login_required'):
            (error, content) = self._login()
            if error != 0:
                # when the login fails
                log.debug("Failed to login to queue: %s", content)
                return (error, content)
            if files_to_upload is not None:
                # Need to rewind file pointers
                for f in files_to_upload:
                    f.seek(0)
            (error, msg) = self._send_to_queue(header, body, files_to_upload)

        return (error, msg)
コード例 #54
0
ファイル: signals.py プロジェクト: 1amongus/edx-platform
def m2m_changed_metrics(sender, **kwargs):
    """
    Record the number of times that Many2Many fields are updated. This is separated
    from post_save and post_delete, because it's signaled by the database model in
    the middle of the Many2Many relationship, rather than either of the models
    that are the relationship participants.

    Args:
        sender (Model): The model class in the middle of the Many2Many relationship.
        action (str): The action being taken on this Many2Many relationship.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance whose many-to-many relation is being modified.
        model (Model class): The model of the class being added/removed/cleared from the relation.
    """
    if 'action' not in kwargs:
        return

    action = {
        'post_add': 'm2m.added',
        'post_remove': 'm2m.removed',
        'post_clear': 'm2m.cleared',
    }.get(kwargs['action'])

    if not action:
        return

    tags = _database_tags(action, sender, kwargs)

    if 'model' in kwargs:
        tags.append('target_class:{}'.format(kwargs['model'].__name__))

    pk_set = kwargs.get('pk_set', []) or []

    dog_stats_api.increment(
        'edxapp.db.model',
        value=len(pk_set),
        tags=tags
    )
コード例 #55
0
def track_memory_usage(metric, course_id):
    """
    Context manager to track how much memory (in bytes) a given process uses.
    Metrics will look like: 'course_email.subtask_generation.memory.rss'
    or 'course_email.subtask_generation.memory.vms'.
    """
    memory_types = ['rss', 'vms']
    process = psutil.Process()
    baseline_memory_info = process.get_memory_info()
    baseline_usages = [
        getattr(baseline_memory_info, memory_type)
        for memory_type in memory_types
    ]
    yield
    for memory_type, baseline_usage in zip(memory_types, baseline_usages):
        total_memory_info = process.get_memory_info()
        total_usage = getattr(total_memory_info, memory_type)
        memory_used = total_usage - baseline_usage
        dog_stats_api.increment(
            metric + "." + memory_type,
            memory_used,
            tags=["course_id:{}".format(course_id)],
        )
コード例 #56
0
ファイル: x_module.py プロジェクト: ybergner/edx-platform
    def handle(self, block, handler_name, request, suffix=''):
        handle = None
        try:
            status = "success"
            return super(MetricsMixin, self).handle(block,
                                                    handler_name,
                                                    request,
                                                    suffix=suffix)

        except:
            status = "failure"
            raise

        finally:
            course_id = getattr(self, 'course_id', '')
            dog_stats_api.increment(
                XMODULE_METRIC_NAME,
                tags=[
                    u'handler_name:{}'.format(handler_name), u'action:handle',
                    u'action_status:{}'.format(status),
                    u'course_id:{}'.format(course_id),
                    u'block_type:{}'.format(block.scope_ids.block_type)
                ])
コード例 #57
0
ファイル: self.py プロジェクト: silviot/edx-ora2
def _log_assessment(assessment, submission):
    """
    Log the creation of a self-assessment.

    Args:
        assessment (Assessment): The assessment model.
        submission (dict): The serialized submission model.

    Returns:
        None

    """
    logger.info(
        u"Created self-assessment {assessment_id} for learner {user} on "
        u"submission {submission_uuid}, course {course_id}, item {item_id} "
        u"with rubric {rubric_content_hash}".format(
            assessment_id=assessment.id,
            user=submission['student_item']['student_id'],
            submission_uuid=submission['uuid'],
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            rubric_content_hash=assessment.rubric.content_hash))

    tags = [
        u"course_id:{course_id}".format(
            course_id=submission['student_item']['course_id']),
        u"item_id:{item_id}".format(
            item_id=submission['student_item']['item_id']), u"type:self"
    ]

    score_percentage = assessment.to_float()
    if score_percentage is not None:
        dog_stats_api.histogram('openassessment.assessment.score_percentage',
                                score_percentage,
                                tags=tags)

    dog_stats_api.increment('openassessment.assessment.count', tags=tags)
コード例 #58
0
def _log_submission(submission, student_item):
    """
    Log the creation of a submission.

    Args:
        submission (dict): The serialized submission model.
        student_item (dict): The serialized student item model.

    Returns:
        None
    """
    logger.info(
        u"Created submission uuid={submission_uuid} for "
        u"(course_id={course_id}, item_id={item_id}, "
        u"anonymous_student_id={anonymous_student_id})"
        .format(
            submission_uuid=submission["uuid"],
            course_id=student_item["course_id"],
            item_id=student_item["item_id"],
            anonymous_student_id=student_item["student_id"]
        )
    )
    tags = [
        u"course_id:{course_id}".format(course_id=student_item['course_id']),
        u"item_id:{item_id}".format(item_id=student_item['item_id']),
        u"item_type:{item_type}".format(item_type=student_item['item_type']),
    ]
    dog_stats_api.increment('submissions.submission.count', tags=tags)

    # Submission answer is a JSON serializable, so we need to serialize it to measure its size in bytes
    try:
        answer_size = len(json.dumps(submission['answer']))
    except (ValueError, TypeError):
        msg = u"Could not serialize submission answer to calculate its length: {}".format(submission['answer'])
        logger.exception(msg)
    else:
        dog_stats_api.histogram('submissions.submission.size', answer_size, tags=tags)
コード例 #59
0
def update_certificate(request):
    """
    Will update GeneratedCertificate for a new certificate or
    modify an existing certificate entry.
    See models.py for a state diagram of certificate states
    This view should only ever be accessed by the xqueue server
    """

    status = CertificateStatuses
    if request.method == "POST":

        xqueue_body = json.loads(request.POST.get('xqueue_body'))
        xqueue_header = json.loads(request.POST.get('xqueue_header'))

        try:
            course_key = SlashSeparatedCourseKey.from_deprecated_string(
                xqueue_body['course_id'])

            cert = GeneratedCertificate.objects.get(
                user__username=xqueue_body['username'],
                course_id=course_key,
                key=xqueue_header['lms_key'])

        except GeneratedCertificate.DoesNotExist:
            logger.critical('Unable to lookup certificate\n'
                            'xqueue_body: {0}\n'
                            'xqueue_header: {1}'.format(
                                xqueue_body, xqueue_header))

            return HttpResponse(json.dumps({
                'return_code': 1,
                'content': 'unable to lookup key'
            }),
                                mimetype='application/json')

        if 'error' in xqueue_body:
            cert.status = status.error
            if 'error_reason' in xqueue_body:

                # Hopefully we will record a meaningful error
                # here if something bad happened during the
                # certificate generation process
                #
                # example:
                #  (aamorm BerkeleyX/CS169.1x/2012_Fall)
                #  <class 'simples3.bucket.S3Error'>:
                #  HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
                #  certificate_agent.py:175

                cert.error_reason = xqueue_body['error_reason']
        else:
            if cert.status in [status.generating, status.regenerating]:
                cert.download_uuid = xqueue_body['download_uuid']
                cert.verify_uuid = xqueue_body['verify_uuid']
                cert.download_url = xqueue_body['url']
                cert.status = status.downloadable
            elif cert.status in [status.deleting]:
                cert.status = status.deleted
            else:
                logger.critical('Invalid state for cert update: {0}'.format(
                    cert.status))
                return HttpResponse(json.dumps({
                    'return_code': 1,
                    'content': 'invalid cert status'
                }),
                                    mimetype='application/json')

        dog_stats_api.increment(XQUEUE_METRIC_NAME,
                                tags=[
                                    u'action:update_certificate',
                                    u'course_id:{}'.format(cert.course_id)
                                ])

        cert.save()
        return HttpResponse(json.dumps({'return_code': 0}),
                            mimetype='application/json')