Пример #1
0
def handle_changes(request, viewset_class, change_type, changes):
    try:
        change_type = int(change_type)
        viewset = viewset_class(request=request)
        viewset.initial(request)
        if change_type in event_handlers:
            start = time.time()
            event_handler = getattr(viewset, event_handlers[change_type], None)
            if event_handler is None:
                raise ChangeNotAllowed(change_type, viewset_class)
            result = event_handler(changes)
            elapsed = time.time() - start

            if elapsed > SLOW_UPDATE_THRESHOLD:
                # This is really a warning rather than an actual error,
                # but using exceptions simplifies reporting it to Sentry,
                # so create and pass along the error but do not raise it.
                try:
                    # we need to raise it to get Python to fill out the stack trace.
                    raise SlowSyncError(change_type, elapsed, changes)
                except SlowSyncError as e:
                    report_exception(e)
            return result
    except Exception as e:
        log_sync_exception(e)
        for change in changes:
            change["errors"] = [str(e)]
        return changes, None
Пример #2
0
def move_nodes_task(
    self,
    user_id,
    channel_id,
    target_id,
    node_id,
    position="last-child",
):
    node = ContentNode.objects.get(id=node_id)
    target = ContentNode.objects.get(id=target_id)

    moved = False
    attempts = 0
    try:
        while not moved and attempts < 10:
            try:
                node.move_to(
                    target,
                    position,
                )
                moved = True
            except OperationalError as e:
                if "deadlock detected" in e.args[0]:
                    pass
                else:
                    raise
    except Exception as e:
        report_exception(e)

    return {"changes": [generate_update_event(node.pk, CONTENTNODE, {"parent": node.parent_id})]}
Пример #3
0
 def report_exception(self, e):
     """
     Marks the task as failed and reports the exception to Sentry
     :type e: Exception
     """
     # @see AsyncResult.traceback
     self.update_state(state=states.FAILURE, traceback=e.__traceback__)
     report_exception(e)
Пример #4
0
def create_perseus_zip(ccnode, exercise_data, write_to_path):
    with zipfile.ZipFile(write_to_path, "w") as zf:
        try:
            exercise_context = {
                'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
            }
            exercise_result = render_to_string('perseus/exercise.json',
                                               exercise_context)
            write_to_zipfile("exercise.json", exercise_result, zf)

            for question in ccnode.assessment_items.prefetch_related(
                    'files').all().order_by('order'):
                try:
                    for image in question.files.filter(
                            preset_id=format_presets.EXERCISE_IMAGE).order_by(
                                'checksum'):
                        image_name = "images/{}.{}".format(
                            image.checksum, image.file_format_id)
                        if image_name not in zf.namelist():
                            with storage.open(
                                    ccmodels.generate_object_storage_name(
                                        image.checksum, str(image)),
                                    'rb') as content:
                                write_to_zipfile(image_name, content.read(),
                                                 zf)

                    for image in question.files.filter(
                            preset_id=format_presets.EXERCISE_GRAPHIE
                    ).order_by('checksum'):
                        svg_name = "images/{0}.svg".format(
                            image.original_filename)
                        json_name = "images/{0}-data.json".format(
                            image.original_filename)
                        if svg_name not in zf.namelist(
                        ) or json_name not in zf.namelist():
                            with storage.open(
                                    ccmodels.generate_object_storage_name(
                                        image.checksum, str(image)),
                                    'rb') as content:
                                content = content.read()
                                # in Python 3, delimiter needs to be in bytes format
                                content = content.split(
                                    exercises.GRAPHIE_DELIMITER.encode(
                                        'ascii'))
                                write_to_zipfile(svg_name, content[0], zf)
                                write_to_zipfile(json_name, content[1], zf)
                    write_assessment_item(question, zf)
                except Exception as e:
                    logging.error("Publishing error: {}".format(str(e)))
                    logging.error(traceback.format_exc())
                    # In production, these errors have historically been handled silently.
                    # Retain that behavior for now, but raise an error locally so we can
                    # better understand the cases in which this might happen.
                    report_exception(e)
                    if os.environ.get('BRANCH_ENVIRONMENT', '') != "master":
                        raise
        finally:
            zf.close()
Пример #5
0
def log_sync_exception(e):
    # Capture exception and report, but allow sync
    # to complete properly.
    report_exception(e)

    if getattr(settings, "DEBUG", False) or getattr(settings, "TEST_ENV",
                                                    False):
        raise
    else:
        # make sure we leave a record in the logs just in case.
        logging.error(e)
Пример #6
0
def calculate_resource_size(node, force=False):
    """
    Function that calculates the total file size of all files of the specified node and it's
    descendants, if they're marked complete

    :param node: The ContentNode for which to calculate resource size.
    :param force: A boolean to force calculation if node is too big and would otherwise do so async
    :return: A tuple of (size, stale)
    :rtype: (int, bool)
    """
    cache = ResourceSizeCache(node)
    db = ResourceSizeHelper(node)

    size = None if force else cache.get_size()
    modified = None if force else cache.get_modified()

    # since we added file.modified as nullable, if the result is None/Null, then we know that it
    # hasn't been modified since our last cached value, so we only need to check is False
    if size is not None and modified is not None and db.modified_since(
            modified) is False:
        # use cache if not modified since cache modified timestamp
        return size, False

    # if the node is too big to calculate its size right away, we return "stale"
    if not force and node.get_descendant_count() > STALE_MAX_CALCULATION_SIZE:
        return size, True

    start = time.time()

    # do recalculation, marking modified time before starting
    now = timezone.now()
    size = db.get_size()
    cache.set_size(size)
    cache.set_modified(now)
    elapsed = time.time() - start

    if not force and elapsed > SLOW_UNFORCED_CALC_THRESHOLD:
        # warn us in Sentry if an unforced recalculation took too long
        try:
            # we need to raise it to get Python to fill out the stack trace.
            raise SlowCalculationError(node.pk, elapsed)
        except SlowCalculationError as e:
            report_exception(e)

    return size, False
Пример #7
0
    def delete_from_changes(self, changes):
        try:
            # reset channel resource size cache
            keys = [change["key"] for change in changes]
            queryset = self.filter_queryset_from_keys(
                self.get_edit_queryset(), keys
            ).order_by()
            # find all root nodes for files, and reset the cache modified date
            root_nodes = ContentNode.objects.filter(
                parent__isnull=True,
                tree_id__in=queryset.values_list('contentnode__tree_id', flat=True).distinct(),
            )
            for root_node in root_nodes:
                ResourceSizeCache(root_node).reset_modified(None)
        except Exception as e:
            report_exception(e)

        return super(FileViewSet, self).delete_from_changes(changes)
Пример #8
0
def delete_node_task(
    self,
    user_id,
    channel_id,
    node_id,
):
    node = ContentNode.objects.get(id=node_id)

    deleted = False
    attempts = 0
    try:
        while not deleted and attempts < 10:
            try:
                node.delete()
                deleted = True
            except OperationalError as e:
                if "deadlock detected" in e.args[0]:
                    pass
                else:
                    raise
    except Exception as e:
        report_exception(e)