コード例 #1
0
    def timer(self, metric_name, course_context):
        """
        Contextmanager which acts as a timer for the metric ``metric_name``,
        but which also yields a :class:`Tagger` object that allows the timed block
        of code to add tags and quantity measurements. Tags are added verbatim to the
        timer output. Measurements are recorded as histogram measurements in their own,
        and also as bucketed tags on the timer measurement.

        Arguments:
            metric_name: The name used to aggregate all of these metrics.
            course_context: The course which the query is being made for.
        """
        tagger = Tagger(self._sample_rate)
        metric_name = "{}.{}".format(self._metric_base, metric_name)

        start = time()
        try:
            yield tagger
        finally:
            end = time()
            tags = tagger.tags
            tags.append("course:{}".format(course_context))
            for name, size in tagger.measures:
                dog_stats_api.histogram(
                    "{}.{}".format(metric_name, name),
                    size,
                    timestamp=end,
                    tags=[tag for tag in tags if not tag.startswith("{}:".format(metric_name))],
                    sample_rate=tagger.sample_rate,
                )
            dog_stats_api.histogram(
                "{}.duration".format(metric_name), end - start, timestamp=end, tags=tags, sample_rate=tagger.sample_rate
            )
            dog_stats_api.increment(metric_name, timestamp=end, tags=tags, sample_rate=tagger.sample_rate)
コード例 #2
0
ファイル: xml_module.py プロジェクト: 10clouds/edx-platform
    def load_metadata(cls, xml_object):
        """
        Read the metadata attributes from this xml_object.

        Returns a dictionary {key: value}.
        """
        metadata = {'xml_attributes': {}}
        for attr, val in xml_object.attrib.iteritems():
            # VS[compat].  Remove after all key translations done
            attr = cls._translate(attr)

            if attr in cls.metadata_to_strip:
                if attr in ('course', 'org', 'url_name', 'filename'):
                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:xmlparser_util_mixin_load_metadata",
                            "metadata:{}".format(attr),
                        )
                    )
                # don't load these
                continue

            if attr not in cls.fields:
                metadata['xml_attributes'][attr] = val
            else:
                metadata[attr] = deserialize_field(cls.fields[attr], val)
        return metadata
コード例 #3
0
    def handle_grade_event(block, event_type, event):
        user_id = event.get('user_id', user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)
コード例 #4
0
    def handle_grade_event(block, event_type, event):  # pylint: disable=unused-argument
        """
        Manages the workflow for recording and updating of student module grade state
        """
        user_id = event.get("user_id", user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state, user_id=user_id, block_scope_id=descriptor.location, field_name="grade"
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get("value")
        student_module.max_grade = event.get("max_value")
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket),
        ]

        if grade_bucket_type is not None:
            tags.append("type:%s" % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)

        # Cycle through the milestone fulfillment scenarios to see if any are now applicable
        # thanks to the updated grading information that was just submitted
        _fulfill_content_milestones(user, course_id, descriptor.location)
コード例 #5
0
ファイル: access.py プロジェクト: mitsei/edx-platform
    def see_exists():
        """
        Can see if can enroll, but also if can load it: if user enrolled in a course and now
        it's past the enrollment period, they should still see it.

        TODO (vshnayder): This means that courses with limited enrollment periods will not appear
        to non-staff visitors after the enrollment period is over.  If this is not what we want, will
        need to change this logic.
        """
        # VS[compat] -- this setting should go away once all courses have
        # properly configured enrollment_start times (if course should be
        # staff-only, set enrollment_start far in the future.)
        if settings.FEATURES.get('ACCESS_REQUIRE_STAFF_FOR_COURSE'):
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=(
                    "location:has_access_course_desc_see_exists",
                    u"course:{}".format(course),
                )
            )

            # if this feature is on, only allow courses that have ispublic set to be
            # seen by non-staff
            if course.ispublic:
                debug("Allow: ACCESS_REQUIRE_STAFF_FOR_COURSE and ispublic")
                return True
            return _has_staff_access_to_descriptor(user, course, course.id)

        return can_enroll() or can_load()
コード例 #6
0
ファイル: access.py プロジェクト: johnny-nan/edx-platform
    def see_exists():
        """
        Can see if can enroll, but also if can load it: if user enrolled in a course and now
        it's past the enrollment period, they should still see it.
        """
        # VS[compat] -- this setting should go away once all courses have
        # properly configured enrollment_start times (if course should be
        # staff-only, set enrollment_start far in the future.)
        if settings.FEATURES.get('ACCESS_REQUIRE_STAFF_FOR_COURSE'):
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=(
                    "location:has_access_course_desc_see_exists",
                    u"course:{}".format(course),
                )
            )

            # if this feature is on, only allow courses that have ispublic set to be
            # seen by non-staff
            if course.ispublic:
                debug("Allow: ACCESS_REQUIRE_STAFF_FOR_COURSE and ispublic")
                return ACCESS_GRANTED
            return _has_staff_access_to_descriptor(user, course, course.id)

        return ACCESS_GRANTED if (can_enroll() or can_load()) else ACCESS_DENIED
コード例 #7
0
ファイル: subtasks.py プロジェクト: CDOT-EDX/edx-platform
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Because select_for_update is used to lock the InstructorTask object while it is being updated,
    multiple subtasks updating at the same time may time out while waiting for the lock.
    The actual update operation is surrounded by a try/except/else that permits the update to be
    retried if the transaction times out.

    The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
    the attempting of retries has concluded.
    """
    try:
        _update_subtask_status(entry_id, current_task_id, new_subtask_status)
    except DatabaseError:
        # If we fail, try again recursively.
        retry_count += 1
        if retry_count < MAX_DATABASE_LOCK_RETRIES:
            TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s:  retry %d",
                          current_task_id, entry_id, new_subtask_status, retry_count)
            dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
            update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
        else:
            TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
                          retry_count, current_task_id, entry_id, new_subtask_status)
            dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
            raise
    finally:
        # Only release the lock on the subtask when we're done trying to update it.
        # Note that this will be called each time a recursive call to update_subtask_status()
        # returns.  Fortunately, it's okay to release a lock that has already been released.
        _release_subtask_lock(current_task_id)
コード例 #8
0
ファイル: signals.py プロジェクト: ahmadiga/min_edx
def m2m_changed_metrics(sender, **kwargs):
    """
    Record the number of times that Many2Many fields are updated. This is separated
    from post_save and post_delete, because it's signaled by the database model in
    the middle of the Many2Many relationship, rather than either of the models
    that are the relationship participants.

    Args:
        sender (Model): The model class in the middle of the Many2Many relationship.
        action (str): The action being taken on this Many2Many relationship.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance whose many-to-many relation is being modified.
        model (Model class): The model of the class being added/removed/cleared from the relation.
    """
    if "action" not in kwargs:
        return

    action = {"post_add": "m2m.added", "post_remove": "m2m.removed", "post_clear": "m2m.cleared"}.get(kwargs["action"])

    if not action:
        return

    tags = _database_tags(action, sender, kwargs)

    if "model" in kwargs:
        tags.append("target_class:{}".format(kwargs["model"].__name__))

    pk_set = kwargs.get("pk_set", []) or []

    dog_stats_api.increment("edxapp.db.model", value=len(pk_set), tags=tags)
コード例 #9
0
    def render_template(self, system, xml_data):
        '''Render the template, given the definition xml_data'''
        xmltree = etree.fromstring(xml_data)
        if 'impl' in xmltree.attrib:
            template_name = xmltree.attrib['impl']
        else:
            # VS[compat]  backwards compatibility with old nested customtag structure
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=["location:customtag_descriptor_render_template"]
            )

            child_impl = xmltree.find('impl')
            if child_impl is not None:
                template_name = child_impl.text
            else:
                # TODO (vshnayder): better exception type
                raise Exception("Could not find impl attribute in customtag {0}"
                                .format(self.location))

        params = dict(xmltree.items())

        # cdodge: look up the template as a module
        template_loc = self.location.replace(category='custom_tag_template', name=template_name)

        template_module = system.load_item(template_loc)
        template_module_data = template_module.data
        template = Template(template_module_data)
        return template.render(**params)
コード例 #10
0
    def backcompat_paths(cls, filepath):
        """
        Get paths for html and xml files.
        """

        dog_stats_api.increment(
            DEPRECATION_VSCOMPAT_EVENT,
            tags=["location:html_descriptor_backcompat_paths"]
        )

        if filepath.endswith('.html.xml'):
            filepath = filepath[:-9] + '.html'  # backcompat--look for html instead of xml
        if filepath.endswith('.html.html'):
            filepath = filepath[:-5]  # some people like to include .html in filenames..
        candidates = []
        while os.sep in filepath:
            candidates.append(filepath)
            _, _, filepath = filepath.partition(os.sep)

        # also look for .html versions instead of .xml
        new_candidates = []
        for candidate in candidates:
            if candidate.endswith('.xml'):
                new_candidates.append(candidate[:-4] + '.html')
        return candidates + new_candidates
コード例 #11
0
ファイル: item.py プロジェクト: 189140879/edx-platform
def _delete_item(usage_key, user):
    """
    Deletes an existing xblock with the given usage_key.
    If the xblock is a Static Tab, removes it from course.tabs as well.
    """
    store = modulestore()

    with store.bulk_operations(usage_key.course_key):
        # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
        # if we add one then we need to also add it to the policy information (i.e. metadata)
        # we should remove this once we can break this reference from the course to static tabs
        if usage_key.category == 'static_tab':

            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=(
                    "location:_delete_item_static_tab",
                    u"course:{}".format(unicode(usage_key.course_key)),
                )
            )

            course = store.get_course(usage_key.course_key)
            existing_tabs = course.tabs or []
            course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != usage_key.name]
            store.update_item(course, user.id)

        store.delete_item(usage_key, user.id)
コード例 #12
0
ファイル: xml_module.py プロジェクト: 189140879/edx-platform
    def load_definition(cls, xml_object, system, def_id, id_generator):
        """
        Load a descriptor definition from the specified xml_object.
        Subclasses should not need to override this except in special
        cases (e.g. html module)

        Args:
            xml_object: an lxml.etree._Element containing the definition to load
            system: the modulestore system (aka, runtime) which accesses data and provides access to services
            def_id: the definition id for the block--used to compute the usage id and asides ids
            id_generator: used to generate the usage_id
        """

        # VS[compat] -- the filename attr should go away once everything is
        # converted.  (note: make sure html files still work once this goes away)
        filename = xml_object.get('filename')
        if filename is None:
            definition_xml = copy.deepcopy(xml_object)
            filepath = ''
        else:
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=["location:xmlparser_util_mixin_load_definition_filename"]
            )

            filepath = cls._format_filepath(xml_object.tag, filename)

            # VS[compat]
            # TODO (cpennington): If the file doesn't exist at the right path,
            # give the class a chance to fix it up. The file will be written out
            # again in the correct format.  This should go away once the CMS is
            # online and has imported all current (fall 2012) courses from xml
            if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
                dog_stats_api.increment(
                    DEPRECATION_VSCOMPAT_EVENT,
                    tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
                )

                candidates = cls.backcompat_paths(filepath)
                for candidate in candidates:
                    if system.resources_fs.exists(candidate):
                        filepath = candidate
                        break

            definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
            usage_id = id_generator.create_usage(def_id)
            system.parse_asides(definition_xml, def_id, usage_id, id_generator)

            # Add the attributes from the pointer node
            definition_xml.attrib.update(xml_object.attrib)

        definition_metadata = cls._get_metadata_from_xml(definition_xml)
        cls.clean_metadata_from_xml(definition_xml)
        definition, children = cls.definition_from_xml(definition_xml, system)
        if definition_metadata:
            definition['definition_metadata'] = definition_metadata
        definition['filename'] = [filepath, filename]

        return definition, children
コード例 #13
0
 def _ddog_increment(self, evt_time, evt_name):
     """
     DataDog increment method.
     """
     dog_stats_api.increment(
         'DjangoXBlockUserStateClient.{}'.format(evt_name),
         timestamp=evt_time,
         sample_rate=self.API_DATADOG_SAMPLE_RATE,
     )
コード例 #14
0
 def backcompat_paths(cls, path):
     dog_stats_api.increment(
         DEPRECATION_VSCOMPAT_EVENT,
         tags=["location:capa_descriptor_backcompat_paths"]
     )
     return [
         'problems/' + path[8:],
         path[8:],
     ]
コード例 #15
0
ファイル: xml.py プロジェクト: 10clouds/edx-platform
def clean_out_mako_templating(xml_string):
    orig_xml = xml_string
    xml_string = xml_string.replace('%include', 'include')
    xml_string = re.sub(r"(?m)^\s*%.*$", '', xml_string)
    if orig_xml != xml_string:
        dog_stats_api.increment(
            DEPRECATION_VSCOMPAT_EVENT,
            tags=["location:xml_clean_out_mako_templating"]
        )
    return xml_string
コード例 #16
0
ファイル: signals.py プロジェクト: ahmadiga/min_edx
def post_delete_metrics(sender, **kwargs):
    """
    Record the number of times that django models are deleted.

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this deletion (optional).
        instance (Model instance): The instance being deleted (optional).
    """
    tags = _database_tags("deleted", sender, kwargs)

    dog_stats_api.increment("edxapp.db.model", tags=tags)
コード例 #17
0
    def handle_grade_event(block, event_type, event):  # pylint: disable=unused-argument
        """
        Manages the workflow for recording and updating of student module grade state
        """
        user_id = event.get('user_id', user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)

        # Cycle through the milestone fulfillment scenarios to see if any are now applicable
        # thanks to the updated grading information that was just submitted
        _fulfill_content_milestones(
            user,
            course_id,
            descriptor.location,
        )

        # Send a signal out to any listeners who are waiting for score change
        # events.
        SCORE_CHANGED.send(
            sender=None,
            points_possible=event['max_value'],
            points_earned=event['value'],
            user_id=user_id,
            course_id=unicode(course_id),
            usage_id=unicode(descriptor.location)
        )
コード例 #18
0
ファイル: signals.py プロジェクト: ahmadiga/min_edx
def post_save_metrics(sender, **kwargs):
    """
    Record the number of times that django models are saved (created or updated).

    Args:
        sender (Model): The model class sending the signals.
        using (str): The name of the database being used for this update (optional).
        instance (Model instance): The instance being updated (optional).
    """
    action = "created" if kwargs.pop("created", False) else "updated"

    tags = _database_tags(action, sender, kwargs)
    dog_stats_api.increment("edxapp.db.model", tags=tags)
コード例 #19
0
ファイル: module_render.py プロジェクト: kotky/edx-platform
    def handle_grade_event(block, event_type, event):  # pylint: disable=unused-argument
        """
        Manages the workflow for recording and updating of student module grade state
        """

        if not settings.FEATURES.get("ALLOW_STUDENT_STATE_UPDATES_ON_CLOSED_COURSE", True):
            # if a course has ended, don't register grading events
            course = modulestore().get_course(course_id, depth=0)
            now = datetime.now(UTC())
            if course.end is not None and now > course.end:
                return

        user_id = event.get('user_id', user.id)

        grade = event.get('value')
        max_grade = event.get('max_value')

        field_data_cache.set_score(
            user_id,
            descriptor.location,
            grade,
            max_grade,
        )

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(grade, max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)

        # Cycle through the milestone fulfillment scenarios to see if any are now applicable
        # thanks to the updated grading information that was just submitted
        _fulfill_content_milestones(
            user,
            course_id,
            descriptor.location,
        )
        # we can treat a grading event as a indication that a user
        # "completed" an xBlock
        if settings.FEATURES.get('MARK_PROGRESS_ON_GRADING_EVENT', False):
            handle_progress_event(block, event_type, event)
コード例 #20
0
    def _record_result(self, action, data, tags=None):
        """
        Log results from an API call to an ORA service to datadog.

        Arguments:
            action (str): The ORA action being recorded.
            data (dict): The data returned from the ORA service. Should contain the key 'success'.
            tags (list): A list of tags to attach to the logged metric.
        """
        if tags is None:
            tags = []

        tags.append(u'result:{}'.format(data.get('success', False)))
        tags.append(u'action:{}'.format(action))
        dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
コード例 #21
0
    def handle_grade_event(block, event_type, event):  # pylint: disable=unused-argument
        """
        Manages the workflow for recording and updating of student module grade state
        """
        user_id = user.id

        grade = event.get('value')
        max_grade = event.get('max_value')

        set_score(
            user_id,
            descriptor.location,
            grade,
            max_grade,
        )

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(grade, max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)

        # Cycle through the milestone fulfillment scenarios to see if any are now applicable
        # thanks to the updated grading information that was just submitted
        _fulfill_content_milestones(
            user,
            course_id,
            descriptor.location,
        )

        # Send a signal out to any listeners who are waiting for score change
        # events.
        SCORE_CHANGED.send(
            sender=None,
            points_possible=event['max_value'],
            points_earned=event['value'],
            user_id=user_id,
            course_id=unicode(course_id),
            usage_id=unicode(descriptor.location)
        )
コード例 #22
0
ファイル: subtasks.py プロジェクト: dmohrC/edx-platform
def track_memory_usage(metric, course_id):
    """
    Context manager to track how much memory (in bytes) a given process uses.
    Metrics will look like: 'course_email.subtask_generation.memory.rss'
    or 'course_email.subtask_generation.memory.vms'.
    """
    memory_types = ["rss", "vms"]
    process = psutil.Process()
    baseline_memory_info = process.get_memory_info()
    baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
    yield
    for memory_type, baseline_usage in zip(memory_types, baseline_usages):
        total_memory_info = process.get_memory_info()
        total_usage = getattr(total_memory_info, memory_type)
        memory_used = total_usage - baseline_usage
        dog_stats_api.increment(metric + "." + memory_type, memory_used, tags=["course_id:{}".format(course_id)])
コード例 #23
0
ファイル: xml.py プロジェクト: 10clouds/edx-platform
                def fallback_name(orig_name=None):
                    """Return the fallback name for this module.  This is a function instead of a variable
                    because we want it to be lazy."""
                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:import_system_fallback_name",
                            u"name:{}".format(orig_name),
                        )
                    )

                    if looks_like_fallback(orig_name):
                        # We're about to re-hash, in case something changed, so get rid of the tag_ and hash
                        orig_name = orig_name[len(tag) + 1:-12]
                    # append the hash of the content--the first 12 bytes should be plenty.
                    orig_name = "_" + orig_name if orig_name not in (None, "") else ""
                    xml_bytes = xml.encode('utf8')
                    return tag + orig_name + "_" + hashlib.sha1(xml_bytes).hexdigest()[:12]
コード例 #24
0
    def handle_grade_event(block, event_type, event):  # pylint: disable=unused-argument
        """
        Manages the workflow for recording and updating of student module grade state
        """
        user_id = event.get('user_id', user.id)

        # Construct the key for the module
        key = KeyValueStore.Key(
            scope=Scope.user_state,
            user_id=user_id,
            block_scope_id=descriptor.location,
            field_name='grade'
        )

        student_module = field_data_cache.find_or_create(key)
        # Update the grades
        student_module.grade = event.get('value')
        student_module.max_grade = event.get('max_value')
        # Save all changes to the underlying KeyValueStore
        student_module.save()

        # Bin score into range and increment stats
        score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)

        tags = [
            u"org:{}".format(course_id.org),
            u"course:{}".format(course_id),
            u"score_bucket:{0}".format(score_bucket)
        ]

        if grade_bucket_type is not None:
            tags.append('type:%s' % grade_bucket_type)

        dog_stats_api.increment("lms.courseware.question_answered", tags=tags)

        # If we're using the awesome edx-milestones app, we need to cycle
        # through the fulfillment scenarios to see if any are now applicable
        # thanks to the updated grading information that was just submitted
        if settings.FEATURES.get('MILESTONES_APP', False):
            _fulfill_content_milestones(
                course_id,
                descriptor.location,
                user_id
            )
コード例 #25
0
ファイル: html_module.py プロジェクト: sigberto/edx-platform
    def backcompat_paths(cls, path):

        dog_stats_api.increment(DEPRECATION_VSCOMPAT_EVENT, tags=["location:html_descriptor_backcompat_paths"])

        if path.endswith(".html.xml"):
            path = path[:-9] + ".html"  # backcompat--look for html instead of xml
        if path.endswith(".html.html"):
            path = path[:-5]  # some people like to include .html in filenames..
        candidates = []
        while os.sep in path:
            candidates.append(path)
            _, _, path = path.partition(os.sep)

        # also look for .html versions instead of .xml
        nc = []
        for candidate in candidates:
            if candidate.endswith(".xml"):
                nc.append(candidate[:-4] + ".html")
        return candidates + nc
コード例 #26
0
    def send_to_queue(self, header, body, files_to_upload=None):
        """
        Submit a request to xqueue.

        header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'

        body: Serialized data for the receipient behind the queueing service. The operation of
                xqueue is agnostic to the contents of 'body'

        files_to_upload: List of file objects to be uploaded to xqueue along with queue request

        Returns (error_code, msg) where error_code != 0 indicates an error
        """

        # log the send to xqueue
        header_info = json.loads(header)
        queue_name = header_info.get('queue_name', u'')
        dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
            u'action:send_to_queue',
            u'queue:{}'.format(queue_name)
        ])

        # Attempt to send to queue
        (error, msg) = self._send_to_queue(header, body, files_to_upload)

        # Log in, then try again
        if error and (msg == 'login_required'):
            (error, content) = self._login()
            if error != 0:
                # when the login fails
                log.debug("Failed to login to queue: %s", content)
                return (error, content)
            if files_to_upload is not None:
                # Need to rewind file pointers
                for f in files_to_upload:
                    f.seek(0)
            (error, msg) = self._send_to_queue(header, body, files_to_upload)

        return (error, msg)
コード例 #27
0
def create_account_with_params_custom(request, params):
    """
    Given a request and a dict of parameters (which may or may not have come
    from the request), create an account for the requesting user, including
    creating a comments service user object and sending an activation email.
    This also takes external/third-party auth into account, updates that as
    necessary, and authenticates the user for the request's session.

    Does not return anything.

    Raises AccountValidationError if an account with the username or email
    specified by params already exists, or ValidationError if any of the given
    parameters is invalid for any other reason.

    Issues with this code:
    * It is not transactional. If there is a failure part-way, an incomplete
      account will be created and left in the database.
    * Third-party auth passwords are not verified. There is a comment that
      they are unused, but it would be helpful to have a sanity check that
      they are sane.
    * It is over 300 lines long (!) and includes disprate functionality, from
      registration e-mails to all sorts of other things. It should be broken
      up into semantically meaningful functions.
    * The user-facing text is rather unfriendly (e.g. "Username must be a
      minimum of two characters long" rather than "Please use a username of
      at least two characters").
    """
    # Copy params so we can modify it; we can't just do dict(params) because if
    # params is request.POST, that results in a dict containing lists of values
    params = dict(params.items())

    # allow to define custom set of required/optional/hidden fields via configuration
    extra_fields = configuration_helpers.get_value(
        'REGISTRATION_EXTRA_FIELDS',
        getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}))

    # Boolean of whether a 3rd party auth provider and credentials were provided in
    # the API so the newly created account can link with the 3rd party account.
    #
    # Note: this is orthogonal to the 3rd party authentication pipeline that occurs
    # when the account is created via the browser and redirect URLs.
    should_link_with_social_auth = third_party_auth.is_enabled(
    ) and 'provider' in params

    if should_link_with_social_auth or (third_party_auth.is_enabled()
                                        and pipeline.running(request)):
        params["password"] = pipeline.make_random_password()

    # Add a form requirement for data sharing consent if the EnterpriseCustomer
    # for the request requires it at login
    extra_fields[
        'data_sharing_consent'] = data_sharing_consent_requirement_at_login(
            request)

    # if doing signup for an external authorization, then get email, password, name from the eamap
    # don't use the ones from the form, since the user could have hacked those
    # unless originally we didn't get a valid email or name from the external auth
    # TODO: We do not check whether these values meet all necessary criteria, such as email length
    do_external_auth = 'ExternalAuthMap' in request.session
    if do_external_auth:
        eamap = request.session['ExternalAuthMap']
        try:
            validate_email(eamap.external_email)
            params["email"] = eamap.external_email
        except ValidationError:
            pass
        if eamap.external_name.strip() != '':
            params["name"] = eamap.external_name
        params["password"] = eamap.internal_password
        log.debug(u'In create_account with external_auth: user = %s, email=%s',
                  params["name"], params["email"])

    extended_profile_fields = configuration_helpers.get_value(
        'extended_profile_fields', [])
    enforce_password_policy = (settings.FEATURES.get(
        "ENFORCE_PASSWORD_POLICY", False) and not do_external_auth)
    # Can't have terms of service for certain SHIB users, like at Stanford
    registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
    tos_required = (registration_fields.get('terms_of_service') != 'hidden'
                    or registration_fields.get('honor_code') != 'hidden') and (
                        not settings.FEATURES.get("AUTH_USE_SHIB")
                        or not settings.FEATURES.get("SHIB_DISABLE_TOS")
                        or not do_external_auth
                        or not eamap.external_domain.startswith(
                            openedx.core.djangoapps.external_auth.views.
                            SHIBBOLETH_DOMAIN_PREFIX))

    params['name'] = "{} {}".format(params.get('first_name'),
                                    params.get('last_name'))
    form = AccountCreationForm(
        data=params,
        extra_fields=extra_fields,
        extended_profile_fields=extended_profile_fields,
        enforce_username_neq_password=True,
        enforce_password_policy=enforce_password_policy,
        tos_required=tos_required,
    )
    custom_form = get_registration_extension_form(data=params)

    # Perform operations within a transaction that are critical to account creation
    with transaction.atomic():
        # first, create the account
        (user, profile,
         registration) = _do_create_account_custom(form, custom_form)

        # next, link the account with social auth, if provided via the API.
        # (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
        if should_link_with_social_auth:
            backend_name = params['provider']
            request.social_strategy = social_utils.load_strategy(request)
            redirect_uri = reverse('social:complete', args=(backend_name, ))
            request.backend = social_utils.load_backend(
                request.social_strategy, backend_name, redirect_uri)
            social_access_token = params.get('access_token')
            if not social_access_token:
                raise ValidationError({
                    'access_token': [
                        _("An access_token is required when passing value ({}) for provider."
                          ).format(params['provider'])
                    ]
                })
            request.session[
                pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
            pipeline_user = None
            error_message = ""
            try:
                pipeline_user = request.backend.do_auth(social_access_token,
                                                        user=user)
            except AuthAlreadyAssociated:
                error_message = _(
                    "The provided access_token is already associated with another user."
                )
            except (HTTPError, AuthException):
                error_message = _("The provided access_token is not valid.")
            if not pipeline_user or not isinstance(pipeline_user, User):
                # Ensure user does not re-enter the pipeline
                request.social_strategy.clean_partial_pipeline()
                raise ValidationError({'access_token': [error_message]})

    # Perform operations that are non-critical parts of account creation
    preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())

    if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
        try:
            enable_notifications(user)
        except Exception:  # pylint: disable=broad-except
            log.exception(
                "Enable discussion notifications failed for user {id}.".format(
                    id=user.id))

    dog_stats_api.increment("common.student.account_created")

    # If the user is registering via 3rd party auth, track which provider they use
    third_party_provider = None
    running_pipeline = None
    if third_party_auth.is_enabled() and pipeline.running(request):
        running_pipeline = pipeline.get(request)
        third_party_provider = provider.Registry.get_from_pipeline(
            running_pipeline)
        # Store received data sharing consent field values in the pipeline for use
        # by any downstream pipeline elements which require them.
        running_pipeline['kwargs'][
            'data_sharing_consent'] = form.cleaned_data.get(
                'data_sharing_consent', None)

    # Track the user's registration
    if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
        tracking_context = tracker.get_tracker().resolve_context()
        identity_args = [
            user.id,  # pylint: disable=no-member
            {
                'email':
                user.email,
                'username':
                user.username,
                'name':
                profile.name,
                # Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
                'age':
                profile.age or -1,
                'yearOfBirth':
                profile.year_of_birth or datetime.datetime.now(UTC).year,
                'education':
                profile.level_of_education_display,
                'address':
                profile.mailing_address,
                'gender':
                profile.gender_display,
                'country':
                unicode(profile.country),
            }
        ]

        if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
            identity_args.append(
                {"MailChimp": {
                    "listId": settings.MAILCHIMP_NEW_USER_LIST_ID
                }})

        analytics.identify(*identity_args)

        analytics.track(
            user.id,
            "edx.bi.user.account.registered", {
                'category':
                'conversion',
                'label':
                params.get('course_id'),
                'provider':
                third_party_provider.name if third_party_provider else None
            },
            context={
                'ip': tracking_context.get('ip'),
                'Google Analytics': {
                    'clientId': tracking_context.get('client_id')
                }
            })

    # Announce registration
    REGISTER_USER.send(sender=None, user=user, profile=profile)

    create_comments_service_user(user)

    # Don't send email if we are:
    #
    # 1. Doing load testing.
    # 2. Random user generation for other forms of testing.
    # 3. External auth bypassing activation.
    # 4. Have the platform configured to not require e-mail activation.
    # 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
    #
    # Note that this feature is only tested as a flag set one way or
    # the other for *new* systems. we need to be careful about
    # changing settings on a running system to make sure no users are
    # left in an inconsistent state (or doing a migration if they are).
    send_email = (
        not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None)
        and not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING')
        and not (do_external_auth and
                 settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'))
        and
        not (third_party_provider
             and third_party_provider.skip_email_verification and user.email
             == running_pipeline['kwargs'].get('details', {}).get('email')))
    if send_email:
        send_account_activation_email(request, registration, user)
    else:
        registration.activate()
        _enroll_user_in_pending_courses(
            user)  # Enroll student in any pending courses

    # Immediately after a user creates an account, we log them in. They are only
    # logged in until they close the browser. They can't log in again until they click
    # the activation link from the email.
    new_user = authenticate(username=user.username,
                            password=params['password'])
    login(request, new_user)
    request.session.set_expiry(0)

    try:
        record_registration_attributions(request, new_user)
    # Don't prevent a user from registering due to attribution errors.
    except Exception:  # pylint: disable=broad-except
        log.exception('Error while attributing cookies to user registration.')

    # TODO: there is no error checking here to see that the user actually logged in successfully,
    # and is not yet an active user.
    if new_user is not None:
        AUDIT_LOG.info(u"Login success on new account creation - {0}".format(
            new_user.username))

    if do_external_auth:
        eamap.user = new_user
        eamap.dtsignup = datetime.datetime.now(UTC)
        eamap.save()
        AUDIT_LOG.info(u"User registered with external_auth %s",
                       new_user.username)
        AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s',
                       new_user.username, eamap)

        if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
            log.info('bypassing activation email')
            new_user.is_active = True
            new_user.save()
            AUDIT_LOG.info(
                u"Login activated on extauth account - {0} ({1})".format(
                    new_user.username, new_user.email))

    return new_user
コード例 #28
0
ファイル: views.py プロジェクト: iivic/BoiseStateX
def _record_feedback_in_datadog(tags):
    datadog_tags = [u"{k}:{v}".format(k=k, v=v) for k, v in tags.items()]
    dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
コード例 #29
0
ファイル: xqueue.py プロジェクト: yellowsignz/edx-platform
def update_certificate(request):
    """
    Will update GeneratedCertificate for a new certificate or
    modify an existing certificate entry.

    See models.py for a state diagram of certificate states

    This view should only ever be accessed by the xqueue server
    """

    status = CertificateStatuses
    if request.method == "POST":

        xqueue_body = json.loads(request.POST.get('xqueue_body'))
        xqueue_header = json.loads(request.POST.get('xqueue_header'))

        try:
            course_key = CourseKey.from_string(xqueue_body['course_id'])

            cert = GeneratedCertificate.eligible_certificates.get(
                user__username=xqueue_body['username'],
                course_id=course_key,
                key=xqueue_header['lms_key'])

        except GeneratedCertificate.DoesNotExist:
            log.critical(
                'Unable to lookup certificate\n'
                'xqueue_body: %s\n'
                'xqueue_header: %s', xqueue_body, xqueue_header)

            return HttpResponse(json.dumps({
                'return_code': 1,
                'content': 'unable to lookup key'
            }),
                                content_type='application/json')

        if 'error' in xqueue_body:
            cert.status = status.error
            if 'error_reason' in xqueue_body:

                # Hopefully we will record a meaningful error
                # here if something bad happened during the
                # certificate generation process
                #
                # example:
                #  (aamorm BerkeleyX/CS169.1x/2012_Fall)
                #  <class 'simples3.bucket.S3Error'>:
                #  HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
                #  certificate_agent.py:175

                cert.error_reason = xqueue_body['error_reason']
        else:
            if cert.status == status.generating:
                cert.download_uuid = xqueue_body['download_uuid']
                cert.verify_uuid = xqueue_body['verify_uuid']
                cert.download_url = xqueue_body['url']
                cert.status = status.downloadable
            elif cert.status in [status.deleting]:
                cert.status = status.deleted
            else:
                log.critical('Invalid state for cert update: %s', cert.status)
                return HttpResponse(json.dumps({
                    'return_code': 1,
                    'content': 'invalid cert status'
                }),
                                    content_type='application/json')

        dog_stats_api.increment(XQUEUE_METRIC_NAME,
                                tags=[
                                    u'action:update_certificate',
                                    u'course_id:{}'.format(cert.course_id)
                                ])

        cert.save()
        return HttpResponse(json.dumps({'return_code': 0}),
                            content_type='application/json')
コード例 #30
0
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
    """
    Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.

    Problems can occur when the parent task has been run twice, and results in duplicate
    subtasks being created for the same InstructorTask entry.  This maybe happens when Celery
    loses its connection to its broker, and any current tasks get requeued.

    If a parent task gets requeued, then the same InstructorTask may have a different set of
    subtasks defined (to do the same thing), so the subtasks from the first queuing would not
    be known to the InstructorTask.  We return an exception in this case.

    If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
    However, we want to prevent it from running again, so we check here to see what the existing
    subtask's status is.  If it is complete, we raise an exception.  We also take a lock on the task,
    so that we can detect if another worker has started work but has not yet completed that work.
    The other worker is allowed to finish, and this raises an exception.

    Raises a DuplicateTaskException exception if it's not a task that should be run.

    If this succeeds, it requires that update_subtask_status() is called to release the lock on the
    task.
    """
    # Confirm that the InstructorTask actually defines subtasks.
    entry = InstructorTask.objects.get(pk=entry_id)
    if len(entry.subtasks) == 0:
        format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks',
                                tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask knows about this particular subtask.
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    if current_task_id not in subtask_status_info:
        format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.unknown',
                                tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask has already been
    # performed successfully.
    subtask_status = SubtaskStatus.from_dict(
        subtask_status_info[current_task_id])
    subtask_state = subtask_status.state
    if subtask_state in READY_STATES:
        format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, subtask_status, entry,
                                new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.completed',
                                tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask is already being
    # retried by another task.
    if subtask_state == RETRY:
        # Check to see if the input number of retries is less than the recorded number.
        # If so, then this is an earlier version of the task, and a duplicate.
        new_retry_count = new_subtask_status.get_retry_count()
        current_retry_count = subtask_status.get_retry_count()
        if new_retry_count < current_retry_count:
            format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
            msg = format_str.format(current_task_id, subtask_status, entry,
                                    new_subtask_status)
            TASK_LOG.warning(msg)
            dog_stats_api.increment(
                'instructor_task.subtask.duplicate.retried',
                tags=[entry.course_id])
            raise DuplicateTaskException(msg)

    # Now we are ready to start working on this.  Try to lock it.
    # If it fails, then it means that another worker is already in the
    # middle of working on this.
    if not _acquire_subtask_lock(current_task_id):
        format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
        msg = format_str.format(current_task_id, entry)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.locked',
                                tags=[entry.course_id])
        raise DuplicateTaskException(msg)
コード例 #31
0
ファイル: xml_module.py プロジェクト: smarnach/edx-platform
    def from_xml(cls, xml_data, system, id_generator):
        """
        Creates an instance of this descriptor from the supplied xml_data.
        This may be overridden by subclasses

        xml_data: A string of xml that will be translated into data and children for
            this module
        system: A DescriptorSystem for interacting with external resources
        """

        xml_object = etree.fromstring(xml_data)
        # VS[compat] -- just have the url_name lookup, once translation is done
        url_name = xml_object.get('url_name', xml_object.get('slug'))
        def_id = id_generator.create_definition(xml_object.tag, url_name)
        usage_id = id_generator.create_usage(def_id)

        # VS[compat] -- detect new-style each-in-a-file mode
        if is_pointer_tag(xml_object):
            # new style:
            # read the actual definition file--named using url_name.replace(':','/')
            filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
            definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
            system.parse_asides(definition_xml, def_id, usage_id, id_generator)
        else:
            filepath = None
            definition_xml = xml_object
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=["location:xmlparser_util_mixin_parse_xml"]
            )

        definition, children = cls.load_definition(definition_xml, system, def_id, id_generator)  # note this removes metadata

        # VS[compat] -- make Ike's github preview links work in both old and
        # new file layouts
        if is_pointer_tag(xml_object):
            # new style -- contents actually at filepath
            definition['filename'] = [filepath, filepath]

        metadata = cls.load_metadata(definition_xml)

        # move definition metadata into dict
        dmdata = definition.get('definition_metadata', '')
        if dmdata:
            metadata['definition_metadata_raw'] = dmdata
            try:
                metadata.update(json.loads(dmdata))
            except Exception as err:
                log.debug('Error in loading metadata %r', dmdata, exc_info=True)
                metadata['definition_metadata_err'] = str(err)

        # Set/override any metadata specified by policy
        cls.apply_policy(metadata, system.get_policy(usage_id))

        field_data = {}
        field_data.update(metadata)
        field_data.update(definition)
        field_data['children'] = children

        field_data['xml_attributes']['filename'] = definition.get('filename', ['', None])  # for git link
        kvs = InheritanceKeyValueStore(initial_values=field_data)
        field_data = KvsFieldData(kvs)

        return system.construct_xblock_from_class(
            cls,
            # We're loading a descriptor, so student_id is meaningless
            ScopeIds(None, xml_object.tag, def_id, usage_id),
            field_data,
        )
コード例 #32
0
    def load_definition(cls, xml_object, system, def_id, id_generator):
        """
        Load a descriptor definition from the specified xml_object.
        Subclasses should not need to override this except in special
        cases (e.g. html module)

        Args:
            xml_object: an lxml.etree._Element containing the definition to load
            system: the modulestore system (aka, runtime) which accesses data and provides access to services
            def_id: the definition id for the block--used to compute the usage id and asides ids
            id_generator: used to generate the usage_id
        """

        # VS[compat] -- the filename attr should go away once everything is
        # converted.  (note: make sure html files still work once this goes away)
        filename = xml_object.get('filename')
        if filename is None:
            definition_xml = copy.deepcopy(xml_object)
            filepath = ''
            aside_children = []
        else:
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=[
                    "location:xmlparser_util_mixin_load_definition_filename"
                ])

            filepath = cls._format_filepath(xml_object.tag, filename)

            # VS[compat]
            # TODO (cpennington): If the file doesn't exist at the right path,
            # give the class a chance to fix it up. The file will be written out
            # again in the correct format.  This should go away once the CMS is
            # online and has imported all current (fall 2012) courses from xml
            if not system.resources_fs.exists(filepath) and hasattr(
                    cls, 'backcompat_paths'):
                dog_stats_api.increment(
                    DEPRECATION_VSCOMPAT_EVENT,
                    tags=[
                        "location:xmlparser_util_mixin_load_definition_backcompat"
                    ])

                candidates = cls.backcompat_paths(filepath)
                for candidate in candidates:
                    if system.resources_fs.exists(candidate):
                        filepath = candidate
                        break

            definition_xml = cls.load_file(filepath, system.resources_fs,
                                           def_id)
            usage_id = id_generator.create_usage(def_id)
            aside_children = system.parse_asides(definition_xml, def_id,
                                                 usage_id, id_generator)

            # Add the attributes from the pointer node
            definition_xml.attrib.update(xml_object.attrib)

        definition_metadata = cls._get_metadata_from_xml(definition_xml)
        cls.clean_metadata_from_xml(definition_xml)
        definition, children = cls.definition_from_xml(definition_xml, system)
        if definition_metadata:
            definition['definition_metadata'] = definition_metadata
        definition['filename'] = [filepath, filename]

        if aside_children:
            definition['aside_children'] = aside_children

        return definition, children
コード例 #33
0
    def parse_xml(cls, node, runtime, keys, id_generator):  # pylint: disable=unused-argument
        """
        Use `node` to construct a new block.

        Arguments:
            node (etree.Element): The xml node to parse into an xblock.

            runtime (:class:`.Runtime`): The runtime to use while parsing.

            keys (:class:`.ScopeIds`): The keys identifying where this block
                will store its data.

            id_generator (:class:`.IdGenerator`): An object that will allow the
                runtime to generate correct definition and usage ids for
                children of this block.

        Returns (XBlock): The newly parsed XBlock

        """
        # VS[compat] -- just have the url_name lookup, once translation is done
        url_name = cls._get_url_name(node)
        def_id = id_generator.create_definition(node.tag, url_name)
        usage_id = id_generator.create_usage(def_id)
        aside_children = []

        # VS[compat] -- detect new-style each-in-a-file mode
        if is_pointer_tag(node):
            # new style:
            # read the actual definition file--named using url_name.replace(':','/')
            definition_xml, filepath = cls.load_definition_xml(
                node, runtime, def_id)
            aside_children = runtime.parse_asides(definition_xml, def_id,
                                                  usage_id, id_generator)
        else:
            filepath = None
            definition_xml = node
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=["location:xmlparser_util_mixin_parse_xml"])

        # Note: removes metadata.
        definition, children = cls.load_definition(definition_xml, runtime,
                                                   def_id, id_generator)

        # VS[compat] -- make Ike's github preview links work in both old and
        # new file layouts
        if is_pointer_tag(node):
            # new style -- contents actually at filepath
            definition['filename'] = [filepath, filepath]

        metadata = cls.load_metadata(definition_xml)

        # move definition metadata into dict
        dmdata = definition.get('definition_metadata', '')
        if dmdata:
            metadata['definition_metadata_raw'] = dmdata
            try:
                metadata.update(json.loads(dmdata))
            except Exception as err:
                log.debug('Error in loading metadata %r',
                          dmdata,
                          exc_info=True)
                metadata['definition_metadata_err'] = str(err)

        definition_aside_children = definition.pop('aside_children', None)
        if definition_aside_children:
            aside_children.extend(definition_aside_children)

        # Set/override any metadata specified by policy
        cls.apply_policy(metadata, runtime.get_policy(usage_id))

        field_data = {}
        field_data.update(metadata)
        field_data.update(definition)
        field_data['children'] = children

        field_data['xml_attributes']['filename'] = definition.get(
            'filename', ['', None])  # for git link
        kvs = InheritanceKeyValueStore(initial_values=field_data)
        field_data = KvsFieldData(kvs)

        xblock = runtime.construct_xblock_from_class(
            cls,
            # We're loading a descriptor, so student_id is meaningless
            ScopeIds(None, node.tag, def_id, usage_id),
            field_data,
        )

        if aside_children:
            asides_tags = [x.tag for x in aside_children]
            asides = runtime.get_asides(xblock)
            for asd in asides:
                if asd.scope_ids.block_type in asides_tags:
                    xblock.add_aside(asd)

        return xblock
コード例 #34
0
ファイル: helpers.py プロジェクト: gopinath81/vmss
def create_xblock(parent_locator,
                  user,
                  category,
                  display_name,
                  boilerplate=None,
                  is_entrance_exam=False):
    """
    Performs the actual grunt work of creating items/xblocks -- knows nothing about requests, views, etc.
    """
    store = modulestore()
    usage_key = usage_key_with_run(parent_locator)
    with store.bulk_operations(usage_key.course_key):
        parent = store.get_item(usage_key)
        dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)

        # get the metadata, display_name, and definition from the caller
        metadata = {}
        data = None
        template_id = boilerplate
        if template_id:
            clz = parent.runtime.load_block_type(category)
            if clz is not None:
                template = clz.get_template(template_id)
                if template is not None:
                    metadata = template.get('metadata', {})
                    data = template.get('data')

        if display_name is not None:
            metadata['display_name'] = display_name

        # We should use the 'fields' kwarg for newer module settings/values (vs. metadata or data)
        fields = {}

        # Entrance Exams: Chapter module positioning
        child_position = None
        if is_entrance_exams_enabled():
            if category == 'chapter' and is_entrance_exam:
                fields['is_entrance_exam'] = is_entrance_exam
                fields[
                    'in_entrance_exam'] = True  # Inherited metadata, all children will have it
                child_position = 0

        # TODO need to fix components that are sending definition_data as strings, instead of as dicts
        # For now, migrate them into dicts here.
        if isinstance(data, basestring):
            data = {'data': data}

        created_block = store.create_child(
            user.id,
            usage_key,
            dest_usage_key.block_type,
            block_id=dest_usage_key.block_id,
            fields=fields,
            definition_data=data,
            metadata=metadata,
            runtime=parent.runtime,
            position=child_position,
        )

        # Entrance Exams: Grader assignment
        if is_entrance_exams_enabled():
            course_key = usage_key.course_key
            course = store.get_course(course_key)
            if hasattr(
                    course,
                    'entrance_exam_enabled') and course.entrance_exam_enabled:
                if category == 'sequential' and parent_locator == course.entrance_exam_id:
                    # Clean up any pre-existing entrance exam graders
                    remove_entrance_exam_graders(course_key, user)
                    grader = {
                        "type": GRADER_TYPES['ENTRANCE_EXAM'],
                        "min_count": 0,
                        "drop_count": 0,
                        "short_label": "Entrance",
                        "weight": 0
                    }
                    grading_model = CourseGradingModel.update_grader_from_json(
                        course.id, grader, user)
                    CourseGradingModel.update_section_grader_type(
                        created_block, grading_model['type'], user)

        # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
        # if we add one then we need to also add it to the policy information (i.e. metadata)
        # we should remove this once we can break this reference from the course to static tabs
        if category == 'static_tab':

            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=(
                    "location:create_xblock_static_tab",
                    u"course:{}".format(unicode(dest_usage_key.course_key)),
                ))

            display_name = display_name or _(
                "Empty")  # Prevent name being None
            course = store.get_course(dest_usage_key.course_key)
            course.tabs.append(
                StaticTab(
                    name=display_name,
                    url_slug=dest_usage_key.name,
                ))
            store.update_item(course, user.id)

        return created_block
コード例 #35
0
ファイル: factories.py プロジェクト: zamanafzal/edx-platform
    def _create(cls, target_class, **kwargs):
        """
        Uses ``**kwargs``:

        :parent_location: (required): the location of the parent module
            (e.g. the parent course or section)

        :category: the category of the resulting item.

        :data: (optional): the data for the item
            (e.g. XML problem definition for a problem item)

        :name: (optional): the name of the item

        :display_name: (optional): the display name of the item

        :metadata: (optional): dictionary of metadata attributes

        :boilerplate: (optional) the boilerplate for overriding field values

        :publish_item: (optional) whether or not to publish the item (default is True)

        :target_class: is ignored
        """

        # All class attributes (from this class and base classes) are
        # passed in via **kwargs. However, some of those aren't actual field values,
        # so pop those off for use separately

        # catch any old style users before they get into trouble
        assert 'template' not in kwargs
        parent_location = kwargs.pop('parent_location', None)
        data = kwargs.pop('data', None)
        category = kwargs.pop('category', None)
        display_name = kwargs.pop('display_name', None)
        metadata = kwargs.pop('metadata', {})
        location = kwargs.pop('location')
        user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
        publish_item = kwargs.pop('publish_item', True)

        assert isinstance(location, UsageKey)
        assert location != parent_location

        store = kwargs.pop('modulestore')

        # This code was based off that in cms/djangoapps/contentstore/views.py
        parent = kwargs.pop('parent', None) or store.get_item(parent_location)

        with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):

            if 'boilerplate' in kwargs:
                template_id = kwargs.pop('boilerplate')
                clz = XBlock.load_class(category, select=prefer_xmodules)
                template = clz.get_template(template_id)
                assert template is not None
                metadata.update(template.get('metadata', {}))
                if not isinstance(data, basestring):
                    data.update(template.get('data'))

            # replace the display name with an optional parameter passed in from the caller
            if display_name is not None:
                metadata['display_name'] = display_name

            module = store.create_child(
                user_id,
                parent.location,
                location.block_type,
                block_id=location.block_id,
                metadata=metadata,
                definition_data=data,
                runtime=parent.runtime,
                fields=kwargs,
            )

            # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
            # if we add one then we need to also add it to the policy information (i.e. metadata)
            # we should remove this once we can break this reference from the course to static tabs
            if category == 'static_tab':
                dog_stats_api.increment(
                    DEPRECATION_VSCOMPAT_EVENT,
                    tags=(
                        "location:itemfactory_create_static_tab",
                        u"block:{}".format(location.block_type),
                    ))

                course = store.get_course(location.course_key)
                name = kwargs.pop('name', 'Static Tab')
                course.tabs.append(
                    CourseTab.load('static_tab',
                                   name=name,
                                   url_slug=location.name))
                store.update_item(course, user_id)

            # parent and publish the item, so it can be accessed
            if 'detached' not in module._class_tags:
                parent.children.append(location)
                store.update_item(parent, user_id)
                if publish_item:
                    published_parent = store.publish(parent.location, user_id)
                    # module is last child of parent
                    return published_parent.get_children()[-1]
                else:
                    return store.get_item(location)
            elif publish_item:
                return store.publish(location, user_id)
            else:
                return module
コード例 #36
0
ファイル: xml_module.py プロジェクト: 10clouds/edx-platform
    def parse_xml(cls, node, runtime, keys, id_generator):  # pylint: disable=unused-argument
        """
        Use `node` to construct a new block.

        Arguments:
            node (etree.Element): The xml node to parse into an xblock.

            runtime (:class:`.Runtime`): The runtime to use while parsing.

            keys (:class:`.ScopeIds`): The keys identifying where this block
                will store its data.

            id_generator (:class:`.IdGenerator`): An object that will allow the
                runtime to generate correct definition and usage ids for
                children of this block.

        Returns (XBlock): The newly parsed XBlock

        """
        # VS[compat] -- just have the url_name lookup, once translation is done
        url_name = node.get('url_name', node.get('slug'))
        def_id = id_generator.create_definition(node.tag, url_name)
        usage_id = id_generator.create_usage(def_id)
        aside_children = []

        # VS[compat] -- detect new-style each-in-a-file mode
        if is_pointer_tag(node):
            # new style:
            # read the actual definition file--named using url_name.replace(':','/')
            filepath = cls._format_filepath(node.tag, name_to_pathname(url_name))
            definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id)
            aside_children = runtime.parse_asides(definition_xml, def_id, usage_id, id_generator)
        else:
            filepath = None
            definition_xml = node
            dog_stats_api.increment(
                DEPRECATION_VSCOMPAT_EVENT,
                tags=["location:xmlparser_util_mixin_parse_xml"]
            )

        # Note: removes metadata.
        definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)

        # VS[compat] -- make Ike's github preview links work in both old and
        # new file layouts
        if is_pointer_tag(node):
            # new style -- contents actually at filepath
            definition['filename'] = [filepath, filepath]

        metadata = cls.load_metadata(definition_xml)

        # move definition metadata into dict
        dmdata = definition.get('definition_metadata', '')
        if dmdata:
            metadata['definition_metadata_raw'] = dmdata
            try:
                metadata.update(json.loads(dmdata))
            except Exception as err:
                log.debug('Error in loading metadata %r', dmdata, exc_info=True)
                metadata['definition_metadata_err'] = str(err)

        definition_aside_children = definition.pop('aside_children', None)
        if definition_aside_children:
            aside_children.extend(definition_aside_children)

        # Set/override any metadata specified by policy
        cls.apply_policy(metadata, runtime.get_policy(usage_id))

        field_data = {}
        field_data.update(metadata)
        field_data.update(definition)
        field_data['children'] = children

        field_data['xml_attributes']['filename'] = definition.get('filename', ['', None])  # for git link
        kvs = InheritanceKeyValueStore(initial_values=field_data)
        field_data = KvsFieldData(kvs)

        xblock = runtime.construct_xblock_from_class(
            cls,
            # We're loading a descriptor, so student_id is meaningless
            ScopeIds(None, node.tag, def_id, usage_id),
            field_data,
        )

        if aside_children:
            asides_tags = [x.tag for x in aside_children]
            asides = runtime.get_asides(xblock)
            for asd in asides:
                if asd.scope_ids.block_type in asides_tags:
                    xblock.add_aside(asd)

        return xblock
コード例 #37
0
ファイル: xml.py プロジェクト: smarnach/edx-platform
    def load_course(self, course_dir, course_ids, tracker):
        """
        Load a course into this module store
        course_path: Course directory name

        returns a CourseDescriptor for the course
        """
        log.debug('========> Starting courselike import from %s', course_dir)
        with open(self.data_dir / course_dir / self.parent_xml) as course_file:

            # VS[compat]
            # TODO (cpennington): Remove this once all fall 2012 courses have
            # been imported into the cms from xml
            course_file = StringIO(clean_out_mako_templating(course_file.read()))

            course_data = etree.parse(course_file, parser=edx_xml_parser).getroot()

            org = course_data.get('org')

            if org is None:
                msg = ("No 'org' attribute set for courselike in {dir}. "
                       "Using default 'edx'".format(dir=course_dir))
                log.warning(msg)
                tracker(msg)
                org = 'edx'

            # Parent XML should be something like 'library.xml' or 'course.xml'
            courselike_label = self.parent_xml.split('.')[0]

            course = course_data.get(courselike_label)

            if course is None:
                msg = (
                    "No '{courselike_label}' attribute set for course in {dir}."
                    " Using default '{default}'".format(
                        courselike_label=courselike_label,
                        dir=course_dir,
                        default=course_dir
                    )
                )
                log.warning(msg)
                tracker(msg)
                course = course_dir

            url_name = course_data.get('url_name', course_data.get('slug'))

            if url_name:
                policy_dir = self.data_dir / course_dir / 'policies' / url_name
                policy_path = policy_dir / 'policy.json'

                policy = self.load_policy(policy_path, tracker)

                # VS[compat]: remove once courses use the policy dirs.
                if policy == {}:

                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:xml_load_course_policy_dir",
                            u"course:{}".format(course),
                        )
                    )

                    old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)
                    policy = self.load_policy(old_policy_path, tracker)
            else:
                policy = {}
                # VS[compat] : 'name' is deprecated, but support it for now...
                if course_data.get('name'):

                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:xml_load_course_course_data_name",
                            u"course:{}".format(course_data.get('course')),
                            u"org:{}".format(course_data.get('org')),
                            u"name:{}".format(course_data.get('name')),
                        )
                    )

                    url_name = Location.clean(course_data.get('name'))
                    tracker("'name' is deprecated for module xml.  Please use "
                            "display_name and url_name.")
                else:
                    url_name = None

            course_id = self.get_id(org, course, url_name)

            if course_ids is not None and course_id not in course_ids:
                return None

            def get_policy(usage_id):
                """
                Return the policy dictionary to be applied to the specified XBlock usage
                """
                return policy.get(policy_key(usage_id), {})

            services = {}
            if self.i18n_service:
                services['i18n'] = self.i18n_service

            if self.fs_service:
                services['fs'] = self.fs_service

            if self.user_service:
                services['user'] = self.user_service

            system = ImportSystem(
                xmlstore=self,
                course_id=course_id,
                course_dir=course_dir,
                error_tracker=tracker,
                load_error_modules=self.load_error_modules,
                get_policy=get_policy,
                mixins=self.xblock_mixins,
                default_class=self.default_class,
                select=self.xblock_select,
                field_data=self.field_data,
                services=services,
            )
            course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode'))
            # If we fail to load the course, then skip the rest of the loading steps
            if isinstance(course_descriptor, ErrorDescriptor):
                return course_descriptor

            self.content_importers(system, course_descriptor, course_dir, url_name)

            log.debug('========> Done with courselike import from %s', course_dir)
            return course_descriptor
コード例 #38
0
ファイル: tasks.py プロジェクト: epixia/spark
def _send_course_email(entry_id, email_id, to_list, global_email_context,
                       subtask_status):
    """
    Performs the email sending task.

    Sends an email to a list of recipients.

    Inputs are:
      * `entry_id`: id of the InstructorTask object to which progress should be recorded.
      * `email_id`: id of the CourseEmail model that is to be emailed.
      * `to_list`: list of recipients.  Each is represented as a dict with the following keys:
        - 'profile__name': full name of User.
        - 'email': email address of User.
        - 'pk': primary key of User model.
      * `global_email_context`: dict containing values that are unique for this email but the same
        for all recipients of this email.  This dict is to be used to fill in slots in email
        template.  It does not include 'name' and 'email', which will be provided by the to_list.
      * `subtask_status` : object of class SubtaskStatus representing current status.

    Sends to all addresses contained in to_list that are not also in the Optout table.
    Emails are sent multi-part, in both plain text and html.

    Returns a tuple of two values:
      * First value is a SubtaskStatus object which represents current progress at the end of this call.

      * Second value is an exception returned by the innards of the method, indicating a fatal error.
        In this case, the number of recipients that were not sent have already been added to the
        'failed' count above.
    """
    # Get information from current task's request:
    parent_task_id = InstructorTask.objects.get(pk=entry_id).task_id
    task_id = subtask_status.task_id
    total_recipients = len(to_list)
    recipient_num = 0
    total_recipients_successful = 0
    total_recipients_failed = 0
    recipients_info = Counter()

    log.info(
        "BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, TotalRecipients: %s",
        parent_task_id, task_id, email_id, total_recipients)

    try:
        course_email = CourseEmail.objects.get(id=email_id)
    except CourseEmail.DoesNotExist as exc:
        log.exception(
            "BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Could not find email to send.",
            parent_task_id, task_id, email_id)
        raise

    # Exclude optouts (if not a retry):
    # Note that we don't have to do the optout logic at all if this is a retry,
    # because we have presumably already performed the optout logic on the first
    # attempt.  Anyone on the to_list on a retry has already passed the filter
    # that existed at that time, and we don't need to keep checking for changes
    # in the Optout list.
    if subtask_status.get_retry_count() == 0:
        to_list, num_optout = _filter_optouts_from_recipients(
            to_list, course_email.course_id)
        subtask_status.increment(skipped=num_optout)

    course_title = global_email_context['course_title']

    # use the email from address in the CourseEmail, if it is present, otherwise compute it
    from_addr = course_email.from_addr if course_email.from_addr else \
        _get_source_address(course_email.course_id, course_title)

    # use the CourseEmailTemplate that was associated with the CourseEmail
    course_email_template = course_email.get_template()
    try:
        connection = get_connection()
        connection.open()

        # Define context values to use in all course emails:
        email_context = {'name': '', 'email': ''}
        email_context.update(global_email_context)

        while to_list:
            # Update context with user-specific values from the user at the end of the list.
            # At the end of processing this user, they will be popped off of the to_list.
            # That way, the to_list will always contain the recipients remaining to be emailed.
            # This is convenient for retries, which will need to send to those who haven't
            # yet been emailed, but not send to those who have already been sent to.
            recipient_num += 1
            current_recipient = to_list[-1]
            email = current_recipient['email']
            email_context['email'] = email
            email_context['name'] = current_recipient['profile__name']
            email_context['user_id'] = current_recipient['pk']
            email_context['course_id'] = course_email.course_id

            # Construct message content using templates and context:
            plaintext_msg = course_email_template.render_plaintext(
                course_email.text_message, email_context)
            html_msg = course_email_template.render_htmltext(
                course_email.html_message, email_context)

            # Create email:
            email_msg = EmailMultiAlternatives(course_email.subject,
                                               plaintext_msg,
                                               from_addr, [email],
                                               connection=connection)
            email_msg.attach_alternative(html_msg, 'text/html')

            # Throttle if we have gotten the rate limiter.  This is not very high-tech,
            # but if a task has been retried for rate-limiting reasons, then we sleep
            # for a period of time between all emails within this task.  Choice of
            # the value depends on the number of workers that might be sending email in
            # parallel, and what the SES throttle rate is.
            if subtask_status.retried_nomax > 0:
                sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)

            try:
                log.info(
                    "BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Recipient num: %s/%s, \
                    Recipient name: %s, Email address: %s", parent_task_id,
                    task_id, email_id, recipient_num, total_recipients,
                    current_recipient['profile__name'], email)
                with dog_stats_api.timer(
                        'course_email.single_send.time.overall',
                        tags=[_statsd_tag(course_title)]):
                    connection.send_messages([email_msg])

            except SMTPDataError as exc:
                # According to SMTP spec, we'll retry error codes in the 4xx range.  5xx range indicates hard failure.
                total_recipients_failed += 1
                log.error(
                    "BulkEmail ==> Status: Failed(SMTPDataError), Task: %s, SubTask: %s, EmailId: %s, \
                    Recipient num: %s/%s, Email address: %s", parent_task_id,
                    task_id, email_id, recipient_num, total_recipients, email)
                if exc.smtp_code >= 400 and exc.smtp_code < 500:
                    # This will cause the outer handler to catch the exception and retry the entire task.
                    raise exc
                else:
                    # This will fall through and not retry the message.
                    log.warning(
                        'BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Recipient num: %s/%s, \
                        Email not delivered to %s due to error %s',
                        parent_task_id, task_id, email_id, recipient_num,
                        total_recipients, email, exc.smtp_error)
                    dog_stats_api.increment('course_email.error',
                                            tags=[_statsd_tag(course_title)])
                    subtask_status.increment(failed=1)

            except SINGLE_EMAIL_FAILURE_ERRORS as exc:
                # This will fall through and not retry the message.
                total_recipients_failed += 1
                log.error(
                    "BulkEmail ==> Status: Failed(SINGLE_EMAIL_FAILURE_ERRORS), Task: %s, SubTask: %s, \
                    EmailId: %s, Recipient num: %s/%s, Email address: %s, Exception: %s",
                    parent_task_id, task_id, email_id, recipient_num,
                    total_recipients, email, exc)
                dog_stats_api.increment('course_email.error',
                                        tags=[_statsd_tag(course_title)])
                subtask_status.increment(failed=1)

            else:
                total_recipients_successful += 1
                log.info(
                    "BulkEmail ==> Status: Success, Task: %s, SubTask: %s, EmailId: %s, \
                    Recipient num: %s/%s, Email address: %s,", parent_task_id,
                    task_id, email_id, recipient_num, total_recipients, email)
                dog_stats_api.increment('course_email.sent',
                                        tags=[_statsd_tag(course_title)])
                if settings.BULK_EMAIL_LOG_SENT_EMAILS:
                    log.info('Email with id %s sent to %s', email_id, email)
                else:
                    log.debug('Email with id %s sent to %s', email_id, email)
                subtask_status.increment(succeeded=1)

            # Pop the user that was emailed off the end of the list only once they have
            # successfully been processed.  (That way, if there were a failure that
            # needed to be retried, the user is still on the list.)
            recipients_info[email] += 1
            to_list.pop()

        log.info(
            "BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Total Successful Recipients: %s/%s, \
            Failed Recipients: %s/%s", parent_task_id, task_id, email_id,
            total_recipients_successful, total_recipients,
            total_recipients_failed, total_recipients)
        duplicate_recipients = [
            "{0} ({1})".format(email, repetition)
            for email, repetition in recipients_info.most_common()
            if repetition > 1
        ]
        if duplicate_recipients:
            log.info(
                "BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Total Duplicate Recipients [%s]: [%s]",
                parent_task_id, task_id, email_id, len(duplicate_recipients),
                ', '.join(duplicate_recipients))

    except INFINITE_RETRY_ERRORS as exc:
        dog_stats_api.increment('course_email.infinite_retry',
                                tags=[_statsd_tag(course_title)])
        # Increment the "retried_nomax" counter, update other counters with progress to date,
        # and set the state to RETRY:
        subtask_status.increment(retried_nomax=1, state=RETRY)
        return _submit_for_retry(entry_id,
                                 email_id,
                                 to_list,
                                 global_email_context,
                                 exc,
                                 subtask_status,
                                 skip_retry_max=True)

    except LIMITED_RETRY_ERRORS as exc:
        # Errors caught here cause the email to be retried.  The entire task is actually retried
        # without popping the current recipient off of the existing list.
        # Errors caught are those that indicate a temporary condition that might succeed on retry.
        dog_stats_api.increment('course_email.limited_retry',
                                tags=[_statsd_tag(course_title)])
        # Increment the "retried_withmax" counter, update other counters with progress to date,
        # and set the state to RETRY:
        subtask_status.increment(retried_withmax=1, state=RETRY)
        return _submit_for_retry(entry_id,
                                 email_id,
                                 to_list,
                                 global_email_context,
                                 exc,
                                 subtask_status,
                                 skip_retry_max=False)

    except BULK_EMAIL_FAILURE_ERRORS as exc:
        dog_stats_api.increment('course_email.error',
                                tags=[_statsd_tag(course_title)])
        num_pending = len(to_list)
        log.exception(
            'Task %s: email with id %d caused send_course_email task to fail with "fatal" exception.  %d emails unsent.',
            task_id, email_id, num_pending)
        # Update counters with progress to date, counting unsent emails as failures,
        # and set the state to FAILURE:
        subtask_status.increment(failed=num_pending, state=FAILURE)
        return subtask_status, exc

    except Exception as exc:
        # Errors caught here cause the email to be retried.  The entire task is actually retried
        # without popping the current recipient off of the existing list.
        # These are unexpected errors.  Since they might be due to a temporary condition that might
        # succeed on retry, we give them a retry.
        dog_stats_api.increment('course_email.limited_retry',
                                tags=[_statsd_tag(course_title)])
        log.exception(
            'Task %s: email with id %d caused send_course_email task to fail with unexpected exception.  Generating retry.',
            task_id, email_id)
        # Increment the "retried_withmax" counter, update other counters with progress to date,
        # and set the state to RETRY:
        subtask_status.increment(retried_withmax=1, state=RETRY)
        return _submit_for_retry(entry_id,
                                 email_id,
                                 to_list,
                                 global_email_context,
                                 exc,
                                 subtask_status,
                                 skip_retry_max=False)

    else:
        # All went well.  Update counters with progress to date,
        # and set the state to SUCCESS:
        subtask_status.increment(state=SUCCESS)
        # Successful completion is marked by an exception value of None.
        return subtask_status, None
    finally:
        # Clean up at the end.
        connection.close()
コード例 #39
0
ファイル: utils.py プロジェクト: jswope00/griffinx
def perform_request(method,
                    url,
                    data_or_params=None,
                    raw=False,
                    metric_action=None,
                    metric_tags=None,
                    paged_results=False):

    if metric_tags is None:
        metric_tags = []

    metric_tags.append(u'method:{}'.format(method))
    if metric_action:
        metric_tags.append(u'action:{}'.format(metric_action))

    if data_or_params is None:
        data_or_params = {}
    headers = {
        'X-Edx-Api-Key': getattr(settings, "COMMENTS_SERVICE_KEY", None),
        'Accept-Language': get_language(),
    }
    request_id = uuid4()
    request_id_dict = {'request_id': request_id}

    if method in ['post', 'put', 'patch']:
        data = data_or_params
        params = request_id_dict
    else:
        data = None
        params = merge_dict(data_or_params, request_id_dict)
    with request_timer(request_id, method, url, metric_tags):
        response = requests.request(method,
                                    url,
                                    data=data,
                                    params=params,
                                    headers=headers,
                                    timeout=5)

    metric_tags.append(u'status_code:{}'.format(response.status_code))
    if response.status_code > 200:
        metric_tags.append(u'result:failure')
    else:
        metric_tags.append(u'result:success')

    dog_stats_api.increment('comment_client.request.count', tags=metric_tags)

    if 200 < response.status_code < 500:
        raise CommentClientRequestError(response.text, response.status_code)
    # Heroku returns a 503 when an application is in maintenance mode
    elif response.status_code == 503:
        raise CommentClientMaintenanceError(response.text)
    elif response.status_code == 500:
        raise CommentClient500Error(response.text)
    else:
        if raw:
            return response.text
        else:
            try:
                data = response.json()
            except ValueError:
                raise CommentClientError(
                    u"Comments service returned invalid JSON for request {request_id}; first 100 characters: '{content}'"
                    .format(request_id=request_id,
                            content=response.text[:100]))
            if paged_results:
                dog_stats_api.histogram(
                    'comment_client.request.paged.result_count',
                    value=len(data.get('collection', [])),
                    tags=metric_tags)
                dog_stats_api.histogram('comment_client.request.paged.page',
                                        value=data.get('page', 1),
                                        tags=metric_tags)
                dog_stats_api.histogram(
                    'comment_client.request.paged.num_pages',
                    value=data.get('num_pages', 1),
                    tags=metric_tags)
            return data
コード例 #40
0
ファイル: subtasks.py プロジェクト: CDOT-EDX/edx-platform
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
    """
    Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.

    Problems can occur when the parent task has been run twice, and results in duplicate
    subtasks being created for the same InstructorTask entry.  This maybe happens when Celery
    loses its connection to its broker, and any current tasks get requeued.

    If a parent task gets requeued, then the same InstructorTask may have a different set of
    subtasks defined (to do the same thing), so the subtasks from the first queuing would not
    be known to the InstructorTask.  We return an exception in this case.

    If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
    However, we want to prevent it from running again, so we check here to see what the existing
    subtask's status is.  If it is complete, we raise an exception.  We also take a lock on the task,
    so that we can detect if another worker has started work but has not yet completed that work.
    The other worker is allowed to finish, and this raises an exception.

    Raises a DuplicateTaskException exception if it's not a task that should be run.

    If this succeeds, it requires that update_subtask_status() is called to release the lock on the
    task.
    """
    # Confirm that the InstructorTask actually defines subtasks.
    entry = InstructorTask.objects.get(pk=entry_id)
    if len(entry.subtasks) == 0:
        format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask knows about this particular subtask.
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    if current_task_id not in subtask_status_info:
        format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, entry, new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask has already been
    # performed successfully.
    subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
    subtask_state = subtask_status.state
    if subtask_state in READY_STATES:
        format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
        msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id])
        raise DuplicateTaskException(msg)

    # Confirm that the InstructorTask doesn't think that this subtask is already being
    # retried by another task.
    if subtask_state == RETRY:
        # Check to see if the input number of retries is less than the recorded number.
        # If so, then this is an earlier version of the task, and a duplicate.
        new_retry_count = new_subtask_status.get_retry_count()
        current_retry_count = subtask_status.get_retry_count()
        if new_retry_count < current_retry_count:
            format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
            msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
            TASK_LOG.warning(msg)
            dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id])
            raise DuplicateTaskException(msg)

    # Now we are ready to start working on this.  Try to lock it.
    # If it fails, then it means that another worker is already in the
    # middle of working on this.
    if not _acquire_subtask_lock(current_task_id):
        format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
        msg = format_str.format(current_task_id, entry)
        TASK_LOG.warning(msg)
        dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id])
        raise DuplicateTaskException(msg)
コード例 #41
0
def create_account_with_params(request, params):
    """
    Given a request and a dict of parameters (which may or may not have come
    from the request), create an account for the requesting user, including
    creating a comments service user object and sending an activation email.
    This also takes external/third-party auth into account, updates that as
    necessary, and authenticates the user for the request's session.

    Does not return anything.

    Raises AccountValidationError if an account with the username or email
    specified by params already exists, or ValidationError if any of the given
    parameters is invalid for any other reason.

    Issues with this code:
    * It is non-transactional except where explicitly wrapped in atomic to
      alleviate deadlocks and improve performance. This means failures at
      different places in registration can leave users in inconsistent
      states.
    * Third-party auth passwords are not verified. There is a comment that
      they are unused, but it would be helpful to have a sanity check that
      they are sane.
    * The user-facing text is rather unfriendly (e.g. "Username must be a
      minimum of two characters long" rather than "Please use a username of
      at least two characters").
    * Duplicate email raises a ValidationError (rather than the expected
      AccountValidationError). Duplicate username returns an inconsistent
      user message (i.e. "An account with the Public Username '{username}'
      already exists." rather than "It looks like {username} belongs to an
      existing account. Try again with a different username.") The two checks
      occur at different places in the code; as a result, registering with
      both a duplicate username and email raises only a ValidationError for
      email only.
    """
    # Copy params so we can modify it; we can't just do dict(params) because if
    # params is request.POST, that results in a dict containing lists of values
    params = dict(params.items())

    # allow to define custom set of required/optional/hidden fields via configuration
    extra_fields = configuration_helpers.get_value(
        'REGISTRATION_EXTRA_FIELDS',
        getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}))
    # registration via third party (Google, Facebook) using mobile application
    # doesn't use social auth pipeline (no redirect uri(s) etc involved).
    # In this case all related info (required for account linking)
    # is sent in params.
    # `third_party_auth_credentials_in_api` essentially means 'request
    # is made from mobile application'
    third_party_auth_credentials_in_api = 'provider' in params
    is_third_party_auth_enabled = third_party_auth.is_enabled()

    if is_third_party_auth_enabled and (pipeline.running(request) or
                                        third_party_auth_credentials_in_api):
        params["password"] = generate_password()

    # in case user is registering via third party (Google, Facebook) and pipeline has expired, show appropriate
    # error message
    if is_third_party_auth_enabled and ('social_auth_provider' in params
                                        and not pipeline.running(request)):
        raise ValidationError({
            'session_expired': [
                _(u"Registration using {provider} has timed out.").format(
                    provider=params.get('social_auth_provider'))
            ]
        })

    do_external_auth, eamap = pre_account_creation_external_auth(
        request, params)

    extended_profile_fields = configuration_helpers.get_value(
        'extended_profile_fields', [])
    # Can't have terms of service for certain SHIB users, like at Stanford
    registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
    tos_required = (registration_fields.get('terms_of_service') != 'hidden'
                    or registration_fields.get('honor_code') != 'hidden') and (
                        not settings.FEATURES.get("AUTH_USE_SHIB")
                        or not settings.FEATURES.get("SHIB_DISABLE_TOS")
                        or not do_external_auth
                        or not eamap.external_domain.startswith(
                            settings.SHIBBOLETH_DOMAIN_PREFIX))

    form = AccountCreationForm(
        data=params,
        extra_fields=extra_fields,
        extended_profile_fields=extended_profile_fields,
        do_third_party_auth=do_external_auth,
        tos_required=tos_required,
    )
    custom_form = get_registration_extension_form(data=params)

    # Perform operations within a transaction that are critical to account creation
    with outer_atomic(read_committed=True):
        # first, create the account
        (user, profile, registration) = do_create_account(form, custom_form)

        third_party_provider, running_pipeline = _link_user_to_third_party_provider(
            is_third_party_auth_enabled,
            third_party_auth_credentials_in_api,
            user,
            request,
            params,
        )

        new_user = authenticate_new_user(request, user.username,
                                         params['password'])
        django_login(request, new_user)
        request.session.set_expiry(0)

        post_account_creation_external_auth(do_external_auth, eamap, new_user)

    # Check if system is configured to skip activation email for the current user.
    skip_email = _skip_activation_email(
        user,
        do_external_auth,
        running_pipeline,
        third_party_provider,
    )

    if skip_email:
        registration.activate()
    else:
        compose_and_send_activation_email(user, profile, registration)

    # Perform operations that are non-critical parts of account creation
    create_or_set_user_attribute_created_on_site(user, request.site)

    preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())

    if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
        try:
            enable_notifications(user)
        except Exception:  # pylint: disable=broad-except
            log.exception(
                "Enable discussion notifications failed for user {id}.".format(
                    id=user.id))

    dog_stats_api.increment("common.student.account_created")

    _track_user_registration(user, profile, params, third_party_provider)

    # Announce registration
    REGISTER_USER.send(sender=None, user=user, registration=registration)

    create_comments_service_user(user)

    try:
        _record_registration_attributions(request, new_user)
    # Don't prevent a user from registering due to attribution errors.
    except Exception:  # pylint: disable=broad-except
        log.exception('Error while attributing cookies to user registration.')

    # TODO: there is no error checking here to see that the user actually logged in successfully,
    # and is not yet an active user.
    if new_user is not None:
        AUDIT_LOG.info(u"Login success on new account creation - {0}".format(
            new_user.username))

    return new_user
コード例 #42
0
ファイル: subtasks.py プロジェクト: CDOT-EDX/edx-platform
def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Uses select_for_update to lock the InstructorTask object while it is being updated.
    The operation is surrounded by a try/except/else that permit the manual transaction to be
    committed on completion, or rolled back on error.

    The InstructorTask's "task_output" field is updated.  This is a JSON-serialized dict.
    Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status`
    into the corresponding values in the InstructorTask's task_output.  Also updates the 'duration_ms'
    value with the current interval since the original InstructorTask started.  Note that this
    value is only approximate, since the subtask may be running on a different server than the
    original task, so is subject to clock skew.

    The InstructorTask's "subtasks" field is also updated.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is expected to have been set at the time the subtasks were created.
    The other three counters are incremented depending on the value of `status`.  Once the counters
    for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's
    "status" is changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
    information for each subtask.  At the moment, the value for each subtask (keyed by its task_id)
    is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
    about failure messages, progress made, etc.
    """
    TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s",
                  current_task_id, entry_id, new_subtask_status)

    try:
        entry = InstructorTask.objects.select_for_update().get(pk=entry_id)
        subtask_dict = json.loads(entry.subtasks)
        subtask_status_info = subtask_dict['status']
        if current_task_id not in subtask_status_info:
            # unexpected error -- raise an exception
            format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
            msg = format_str.format(current_task_id, entry_id)
            TASK_LOG.warning(msg)
            raise ValueError(msg)

        # Update status:
        subtask_status_info[current_task_id] = new_subtask_status.to_dict()

        # Update the parent task progress.
        # Set the estimate of duration, but only if it
        # increases.  Clock skew between time() returned by different machines
        # may result in non-monotonic values for duration.
        task_progress = json.loads(entry.task_output)
        start_time = task_progress['start_time']
        prev_duration = task_progress['duration_ms']
        new_duration = int((time() - start_time) * 1000)
        task_progress['duration_ms'] = max(prev_duration, new_duration)

        # Update counts only when subtask is done.
        # In future, we can make this more responsive by updating status
        # between retries, by comparing counts that change from previous
        # retry.
        new_state = new_subtask_status.state
        if new_subtask_status is not None and new_state in READY_STATES:
            for statname in ['attempted', 'succeeded', 'failed', 'skipped']:
                task_progress[statname] += getattr(new_subtask_status, statname)

        # Figure out if we're actually done (i.e. this is the last task to complete).
        # This is easier if we just maintain a counter, rather than scanning the
        # entire new_subtask_status dict.
        if new_state == SUCCESS:
            subtask_dict['succeeded'] += 1
        elif new_state in READY_STATES:
            subtask_dict['failed'] += 1
        num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed']

        # If we're done with the last task, update the parent status to indicate that.
        # At present, we mark the task as having succeeded.  In future, we should see
        # if there was a catastrophic failure that occurred, and figure out how to
        # report that here.
        if num_remaining <= 0:
            entry.task_state = SUCCESS
        entry.subtasks = json.dumps(subtask_dict)
        entry.task_output = InstructorTask.create_output_for_success(task_progress)

        TASK_LOG.debug("about to save....")
        entry.save()
        TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d",
                      entry.task_output, current_task_id, entry_id)
    except Exception:
        TASK_LOG.exception("Unexpected error while updating InstructorTask.")
        transaction.rollback()
        dog_stats_api.increment('instructor_task.subtask.update_exception')
        raise
    else:
        TASK_LOG.debug("about to commit....")
        transaction.commit()
コード例 #43
0
ファイル: utils.py プロジェクト: tanyaxp/edx-platform
def perform_request(method,
                    url,
                    data_or_params=None,
                    raw=False,
                    metric_action=None,
                    metric_tags=None,
                    paged_results=False):
    # To avoid dependency conflict
    from django_comment_common.models import ForumsConfig
    config = ForumsConfig.current()

    if not config.enabled:
        raise CommentClientMaintenanceError('service disabled')

    if metric_tags is None:
        metric_tags = []

    metric_tags.append(u'method:{}'.format(method))
    if metric_action:
        metric_tags.append(u'action:{}'.format(metric_action))

    if data_or_params is None:
        data_or_params = {}
    headers = {
        'X-Edx-Api-Key': config.api_key,
        'Accept-Language': get_language(),
    }
    request_id = uuid4()
    request_id_dict = {'request_id': request_id}

    if method in ['post', 'put', 'patch']:
        data = data_or_params
        params = request_id_dict
    else:
        data = None
        params = merge_dict(data_or_params, request_id_dict)
    with request_timer(request_id, method, url, metric_tags):
        response = requests.request(method,
                                    url,
                                    data=data,
                                    params=params,
                                    headers=headers,
                                    timeout=config.connection_timeout)

    metric_tags.append(u'status_code:{}'.format(response.status_code))
    if response.status_code > 200:
        metric_tags.append(u'result:failure')
    else:
        metric_tags.append(u'result:success')

    dog_stats_api.increment('comment_client.request.count', tags=metric_tags)

    if 200 < response.status_code < 500:
        raise CommentClientRequestError(response.text, response.status_code)
    # Heroku returns a 503 when an application is in maintenance mode
    elif response.status_code == 503:
        raise CommentClientMaintenanceError(response.text)
    elif response.status_code == 500:
        raise CommentClient500Error(response.text)
    else:
        if raw:
            return response.text
        else:
            try:
                data = response.json()
            except ValueError:
                message = (u"Comments service returned invalid JSON "
                           "for request {request_id} at url {url} "
                           "with HTTP status code {status_code}; "
                           "first 100 characters: '{content}'")
                raise CommentClientError(
                    message.format(
                        request_id=request_id,
                        url=response.url,
                        status_code=response.status_code,
                        content=response.text[:100],
                    ))
            if paged_results:
                dog_stats_api.histogram(
                    'comment_client.request.paged.result_count',
                    value=len(data.get('collection', [])),
                    tags=metric_tags)
                dog_stats_api.histogram('comment_client.request.paged.page',
                                        value=data.get('page', 1),
                                        tags=metric_tags)
                dog_stats_api.histogram(
                    'comment_client.request.paged.num_pages',
                    value=data.get('num_pages', 1),
                    tags=metric_tags)
            return data
コード例 #44
0
    def load_definition(cls, xml_object, system, location, id_generator):
        '''Load a descriptor from the specified xml_object:

        If there is a filename attribute, load it as a string, and
        log a warning if it is not parseable by etree.HTMLParser.

        If there is not a filename attribute, the definition is the body
        of the xml_object, without the root tag (do not want <html> in the
        middle of a page)

        Args:
            xml_object: an lxml.etree._Element containing the definition to load
            system: the modulestore system or runtime which caches data
            location: the usage id for the block--used to compute the filename if none in the xml_object
            id_generator: used by other impls of this method to generate the usage_id
        '''
        filename = xml_object.get('filename')
        if filename is None:
            definition_xml = copy.deepcopy(xml_object)
            cls.clean_metadata_from_xml(definition_xml)
            return {'data': stringify_children(definition_xml)}, []
        else:
            # html is special.  cls.filename_extension is 'xml', but
            # if 'filename' is in the definition, that means to load
            # from .html
            # 'filename' in html pointers is a relative path
            # (not same as 'html/blah.html' when the pointer is in a directory itself)
            pointer_path = "{category}/{url_path}".format(
                category='html', url_path=name_to_pathname(location.name))
            base = path(pointer_path).dirname()
            # log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
            filepath = "{base}/{name}.html".format(base=base, name=filename)
            # log.debug("looking for html file for {0} at {1}".format(location, filepath))

            # VS[compat]
            # TODO (cpennington): If the file doesn't exist at the right path,
            # give the class a chance to fix it up. The file will be written out
            # again in the correct format.  This should go away once the CMS is
            # online and has imported all current (fall 2012) courses from xml
            if not system.resources_fs.exists(filepath):

                dog_stats_api.increment(
                    DEPRECATION_VSCOMPAT_EVENT,
                    tags=["location:html_descriptor_load_definition"])

                candidates = cls.backcompat_paths(filepath)
                # log.debug("candidates = {0}".format(candidates))
                for candidate in candidates:
                    if system.resources_fs.exists(candidate):
                        filepath = candidate
                        break

            try:
                with system.resources_fs.open(filepath) as infile:
                    html = infile.read().decode('utf-8')
                    # Log a warning if we can't parse the file, but don't error
                    if not check_html(html) and len(html) > 0:
                        msg = "Couldn't parse html in {0}, content = {1}".format(
                            filepath, html)
                        log.warning(msg)
                        system.error_tracker("Warning: " + msg)

                    definition = {'data': html}

                    # TODO (ichuang): remove this after migration
                    # for Fall 2012 LMS migration: keep filename (and unmangled filename)
                    definition['filename'] = [filepath, filename]

                    return definition, []

            except (ResourceNotFoundError) as err:
                msg = 'Unable to load file contents at path {0}: {1} '.format(
                    filepath, err)
                # add more info and re-raise
                raise Exception(msg), None, sys.exc_info()[2]
コード例 #45
0
def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
    """
    Update the status of the subtask in the parent InstructorTask object tracking its progress.

    Uses select_for_update to lock the InstructorTask object while it is being updated.
    The operation is surrounded by a try/except/else that permit the manual transaction to be
    committed on completion, or rolled back on error.

    The InstructorTask's "task_output" field is updated.  This is a JSON-serialized dict.
    Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status`
    into the corresponding values in the InstructorTask's task_output.  Also updates the 'duration_ms'
    value with the current interval since the original InstructorTask started.  Note that this
    value is only approximate, since the subtask may be running on a different server than the
    original task, so is subject to clock skew.

    The InstructorTask's "subtasks" field is also updated.  This is also a JSON-serialized dict.
    Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
    subtasks.  'Total' is expected to have been set at the time the subtasks were created.
    The other three counters are incremented depending on the value of `status`.  Once the counters
    for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's
    "status" is changed to SUCCESS.

    The "subtasks" field also contains a 'status' key, that contains a dict that stores status
    information for each subtask.  At the moment, the value for each subtask (keyed by its task_id)
    is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
    about failure messages, progress made, etc.
    """
    TASK_LOG.info(
        "Preparing to update status for subtask %s for instructor task %d with status %s",
        current_task_id, entry_id, new_subtask_status)

    try:
        entry = InstructorTask.objects.select_for_update().get(pk=entry_id)
        subtask_dict = json.loads(entry.subtasks)
        subtask_status_info = subtask_dict['status']
        if current_task_id not in subtask_status_info:
            # unexpected error -- raise an exception
            format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
            msg = format_str.format(current_task_id, entry_id)
            TASK_LOG.warning(msg)
            raise ValueError(msg)

        # Update status:
        subtask_status_info[current_task_id] = new_subtask_status.to_dict()

        # Update the parent task progress.
        # Set the estimate of duration, but only if it
        # increases.  Clock skew between time() returned by different machines
        # may result in non-monotonic values for duration.
        task_progress = json.loads(entry.task_output)
        start_time = task_progress['start_time']
        prev_duration = task_progress['duration_ms']
        new_duration = int((time() - start_time) * 1000)
        task_progress['duration_ms'] = max(prev_duration, new_duration)

        # Update counts only when subtask is done.
        # In future, we can make this more responsive by updating status
        # between retries, by comparing counts that change from previous
        # retry.
        new_state = new_subtask_status.state
        if new_subtask_status is not None and new_state in READY_STATES:
            for statname in ['attempted', 'succeeded', 'failed', 'skipped']:
                task_progress[statname] += getattr(new_subtask_status,
                                                   statname)

        # Figure out if we're actually done (i.e. this is the last task to complete).
        # This is easier if we just maintain a counter, rather than scanning the
        # entire new_subtask_status dict.
        if new_state == SUCCESS:
            subtask_dict['succeeded'] += 1
        elif new_state in READY_STATES:
            subtask_dict['failed'] += 1
        num_remaining = subtask_dict['total'] - subtask_dict[
            'succeeded'] - subtask_dict['failed']

        # If we're done with the last task, update the parent status to indicate that.
        # At present, we mark the task as having succeeded.  In future, we should see
        # if there was a catastrophic failure that occurred, and figure out how to
        # report that here.
        if num_remaining <= 0:
            entry.task_state = SUCCESS
        entry.subtasks = json.dumps(subtask_dict)
        entry.task_output = InstructorTask.create_output_for_success(
            task_progress)

        TASK_LOG.debug("about to save....")
        entry.save()
        TASK_LOG.info(
            "Task output updated to %s for subtask %s of instructor task %d",
            entry.task_output, current_task_id, entry_id)
    except Exception:
        TASK_LOG.exception("Unexpected error while updating InstructorTask.")
        transaction.rollback()
        dog_stats_api.increment('instructor_task.subtask.update_exception')
        raise
    else:
        TASK_LOG.debug("about to commit....")
        transaction.commit()
コード例 #46
0
ファイル: views.py プロジェクト: geekmooc/edx-platform
def update_certificate(request):
    """
    Will update GeneratedCertificate for a new certificate or
    modify an existing certificate entry.

    See models.py for a state diagram of certificate states

    This view should only ever be accessed by the xqueue server
    """

    status = CertificateStatuses
    if request.method == "POST":

        xqueue_body = json.loads(request.POST.get("xqueue_body"))
        xqueue_header = json.loads(request.POST.get("xqueue_header"))

        try:
            course_key = SlashSeparatedCourseKey.from_deprecated_string(xqueue_body["course_id"])

            cert = GeneratedCertificate.objects.get(
                user__username=xqueue_body["username"], course_id=course_key, key=xqueue_header["lms_key"]
            )

        except GeneratedCertificate.DoesNotExist:
            logger.critical(
                "Unable to lookup certificate\n"
                "xqueue_body: {0}\n"
                "xqueue_header: {1}".format(xqueue_body, xqueue_header)
            )

            return HttpResponse(
                json.dumps({"return_code": 1, "content": "unable to lookup key"}), mimetype="application/json"
            )

        if "error" in xqueue_body:
            cert.status = status.error
            if "error_reason" in xqueue_body:

                # Hopefully we will record a meaningful error
                # here if something bad happened during the
                # certificate generation process
                #
                # example:
                #  (aamorm BerkeleyX/CS169.1x/2012_Fall)
                #  <class 'simples3.bucket.S3Error'>:
                #  HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
                #  certificate_agent.py:175

                cert.error_reason = xqueue_body["error_reason"]
        else:
            if cert.status in [status.generating, status.regenerating]:
                cert.download_uuid = xqueue_body["download_uuid"]
                cert.verify_uuid = xqueue_body["verify_uuid"]
                cert.download_url = xqueue_body["url"]
                cert.status = status.downloadable
            elif cert.status in [status.deleting]:
                cert.status = status.deleted
            else:
                logger.critical("Invalid state for cert update: {0}".format(cert.status))
                return HttpResponse(
                    json.dumps({"return_code": 1, "content": "invalid cert status"}), mimetype="application/json"
                )

        dog_stats_api.increment(
            XQUEUE_METRIC_NAME, tags=[u"action:update_certificate", u"course_id:{}".format(cert.course_id)]
        )

        cert.save()
        return HttpResponse(json.dumps({"return_code": 0}), mimetype="application/json")
コード例 #47
0
ファイル: xml.py プロジェクト: nasthagiri/modulestore
    def _load_extra_content(self, system, course_descriptor, category,
                            content_path, course_dir):
        """
        Import fields data content from files
        """
        for filepath in glob.glob(content_path / '*'):
            if not os.path.isfile(filepath):
                continue

            if filepath.endswith('~'):  # skip *~ files
                continue

            with open(filepath) as f:
                try:
                    if filepath.find('.json') != -1:
                        # json file with json data content
                        slug, loc, data_content = self._import_field_content(
                            course_descriptor, category, filepath)
                        if data_content is None:
                            continue
                        else:
                            try:
                                # get and update data field in xblock runtime
                                module = system.load_item(loc)
                                for key, value in data_content.iteritems():
                                    setattr(module, key, value)
                                module.save()
                            except ItemNotFoundError:
                                module = None
                                data_content['location'] = loc
                                data_content['category'] = category
                    else:
                        slug = os.path.splitext(os.path.basename(filepath))[0]
                        loc = course_descriptor.scope_ids.usage_id.replace(
                            category=category, name=slug)
                        # html file with html data content
                        html = f.read().decode('utf-8')
                        try:
                            module = system.load_item(loc)
                            module.data = html
                            module.save()
                        except ItemNotFoundError:
                            module = None
                            data_content = {
                                'data': html,
                                'location': loc,
                                'category': category
                            }

                    if module is None:
                        module = system.construct_xblock(
                            category,
                            # We're loading a descriptor, so student_id is meaningless
                            # We also don't have separate notions of definition and usage ids yet,
                            # so we use the location for both
                            ScopeIds(None, category, loc, loc),
                            DictFieldData(data_content),
                        )
                        # VS[compat]:
                        # Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them)
                        # from the course policy
                        if category == "static_tab":
                            dog_stats_api.increment(
                                DEPRECATION_VSCOMPAT_EVENT,
                                tags=(
                                    "location:xml_load_extra_content_static_tab",
                                    u"course_dir:{}".format(course_dir),
                                ))

                            tab = CourseTabList.get_tab_by_slug(
                                tab_list=course_descriptor.tabs, url_slug=slug)
                            if tab:
                                module.display_name = tab.name
                                module.course_staff_only = tab.course_staff_only
                        module.data_dir = course_dir
                        module.save()

                        self.modules[course_descriptor.id][
                            module.scope_ids.usage_id] = module
                except Exception as exc:  # pylint: disable=broad-except
                    logging.exception(
                        "Failed to load %s. Skipping... \
                            Exception: %s", filepath, unicode(exc))
                    system.error_tracker("ERROR: " + unicode(exc))