Beispiel #1
0
def auto_delete_older_than():
    logger = logging.getLogger('limitation')
    model_list = [('crash', 'Crash'), ('feedback', 'Feedback')]
    for model in model_list:
        result = delete_older_than(*model)
        if result.get('count', 0):
            result['size'] /= 1024.0 * 1024
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            raven_extra = dict(id=log_id,
                               splunk_url=splunk_url,
                               splunk_filter=splunk_filter)
            raven.captureMessage(
                "[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %.2f Mb[%d]"
                % (result['count'], model[1], result['size'], time.time()),
                data=dict(level=20, logger='limitation'),
                extra=raven_extra)
            extra = dict(log_id=log_id,
                         meta=True,
                         count=result['count'],
                         size=result['size'],
                         model=model[1],
                         reason='old')
            logger.info(
                add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update(dict(log_id=log_id))
                logger.info(
                    add_extra_to_log_message('Automatic cleanup element',
                                             extra=element))
Beispiel #2
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    full_result = dict(count=0, size=0, elements=[], signatures={})
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']
            full_result['signatures'].update(result['signatures'])

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    extra = dict(elements=full_result['elements'])
    extra.update(full_result['signatures'])
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%d]" %
                         (full_result['count'], model[1], full_result['size'], time.time()),
                         data=dict(level=20, logger='limitation'), extra=extra)
Beispiel #3
0
def deferred_manual_cleanup(model,
                            limit_size=None,
                            limit_days=None,
                            limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id,
                       splunk_url=splunk_url,
                       splunk_filter=splunk_filter)
    raven.captureMessage(
        "[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]"
        % (full_result['count'], model[1], full_result['size'], log_id),
        data=dict(level=20, logger='limitation'),
        extra=raven_extra)

    extra = dict(log_id=log_id,
                 meta=True,
                 count=full_result['count'],
                 size=full_result['size'],
                 model=model[1],
                 limit_duplicated=limit_duplicated,
                 limit_size=limit_size,
                 limit_days=limit_days,
                 reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(
            add_extra_to_log_message('Manual cleanup element', extra=element))
    def test_feedbacks(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm['Feedback__limit_storage_days'] = 2
        FeedbackFactory.create_batch(10, created=old_date)
        Feedback.objects.update(created=old_date)
        self.assertEqual(Feedback.objects.all().count(), 10)

        deleted = list(Feedback.objects.values_list('id', 'created'))
        deleted = map(lambda x: dict(id=x[0], element_created=x[1].strftime("%d. %B %Y %I:%M%p")), deleted)

        result = delete_older_than('feedback', 'Feedback')

        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Feedback.objects.all().count(), 0)
    def test_feedbacks(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm["Feedback__limit_storage_days"] = 2
        FeedbackFactory.create_batch(10, created=old_date)
        Feedback.objects.update(created=old_date)
        self.assertEqual(Feedback.objects.all().count(), 10)

        created_to_string = lambda x: "Created: %s" % x.strftime("%d. %B %Y %I:%M%p")
        deleted = list(Feedback.objects.values_list("id", "created"))
        deleted = map(lambda x: (x[0], created_to_string(x[1])), deleted)

        result = delete_older_than("feedback", "Feedback")
        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Feedback.objects.all().count(), 0)
Beispiel #6
0
def auto_delete_older_than():
    model_list = [
        ('crash', 'Crash'),
        ('feedback', 'Feedback')
    ]
    for model in model_list:
        result = delete_older_than(*model)
        if result.get('count', 0):
            result['size'] /= 1024.0 * 1024
            extra = dict(elements=result['elements'])

            raven.captureMessage("[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %.2f Mb[%d]" %
                                 (result['count'], model[1], result['size'], time.time()),
                                 data=dict(level=20, logger='limitation'), extra=extra)
    def test_crashes(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm['Crash__limit_storage_days'] = 2
        CrashFactory.create_batch(10, created=old_date)
        Crash.objects.update(created=old_date)
        self.assertEqual(Crash.objects.all().count(), 10)

        deleted = list(Crash.objects.values_list('id', 'created', 'signature', 'userid', 'appid'))
        deleted = map(lambda x: dict(id=x[0], element_created=x[1].strftime("%d. %B %Y %I:%M%p"), signature=x[2],
                                     userid=x[3], appid=x[4]), deleted)

        result = delete_older_than('crash', 'Crash')

        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Crash.objects.all().count(), 0)
    def test_crashes(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm["Crash__limit_storage_days"] = 2
        CrashFactory.create_batch(10, created=old_date)
        Crash.objects.update(created=old_date)
        self.assertEqual(Crash.objects.all().count(), 10)

        created_to_string = lambda x: "Created: %s" % x.strftime("%d. %B %Y %I:%M%p")
        signature_to_string = lambda x: "Signature: %s" % x
        deleted = list(Crash.objects.values_list("id", "created", "signature"))
        deleted = map(lambda x: (x[0], created_to_string(x[1]), signature_to_string(x[2])), deleted)

        result = delete_older_than("crash", "Crash")
        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Crash.objects.all().count(), 0)
Beispiel #9
0
    def test_feedbacks(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm['Feedback__limit_storage_days'] = 2
        FeedbackFactory.create_batch(10, created=old_date)
        Feedback.objects.update(created=old_date)
        self.assertEqual(Feedback.objects.all().count(), 10)

        deleted = list(Feedback.objects.values_list('id', 'created'))
        deleted = map(
            lambda x: dict(id=x[0],
                           element_created=x[1].strftime("%d. %B %Y %I:%M%p")),
            deleted)

        result = delete_older_than('feedback', 'Feedback')

        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Feedback.objects.all().count(), 0)
Beispiel #10
0
def auto_delete_older_than():
    logger = logging.getLogger('limitation')
    model_list = [('crash', 'Crash'), ('feedback', 'Feedback')]
    for model in model_list:
        result = delete_older_than(*model)
        if result.get('count', 0):
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            ids_list = sorted(
                [element['id'] for element in result['elements']])
            raven_extra = {
                "id": log_id,
                "splunk_url": splunk_url,
                "splunk_filter": splunk_filter,
                "%s_list" % (model[1]): ids_list
            }
            raven.captureMessage(
                "[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %s [%d]"
                % (result['count'], model[1],
                   filters.filesizeformat(result['size']).replace(
                       u'\xa0', u' '), time.time()),
                data=dict(level=20, logger='limitation'),
                extra=raven_extra)
            extra = dict(log_id=log_id,
                         meta=True,
                         count=result['count'],
                         size=filters.filesizeformat(result['size']).replace(
                             u'\xa0', u' '),
                         model=model[1],
                         reason='old')
            logger.info(
                add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update({
                    "log_id": log_id,
                    "%s_id" % (model[1]): element.pop('id')
                })
                logger.info(
                    add_extra_to_log_message('Automatic cleanup element',
                                             extra=element))
Beispiel #11
0
    def test_crashes(self):
        old_date = timezone.now() - timezone.timedelta(days=5)
        gpm['Crash__limit_storage_days'] = 2
        CrashFactory.create_batch(10, created=old_date)
        Crash.objects.update(created=old_date)
        self.assertEqual(Crash.objects.all().count(), 10)

        deleted = list(
            Crash.objects.values_list('id', 'created', 'signature', 'userid',
                                      'appid'))
        deleted = map(
            lambda x: dict(id=x[0],
                           element_created=x[1].strftime("%d. %B %Y %I:%M%p"),
                           signature=x[2],
                           userid=x[3],
                           appid=x[4]), deleted)

        result = delete_older_than('crash', 'Crash')

        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted))
        self.assertEqual(Crash.objects.all().count(), 0)
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]" %
                         (full_result['count'], model[1], full_result['size'], log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=full_result['size'], model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
Beispiel #13
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    ids_list = sorted([element['id'] for element in full_result['elements']])
    raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list}
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %s [%s]" %
                         (full_result['count'], model[1], filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')})
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
Beispiel #14
0
def auto_delete_older_than():
    logger = logging.getLogger('limitation')
    model_list = [
        ('crash', 'Crash'),
        ('feedback', 'Feedback')
    ]
    for model in model_list:
        result = delete_older_than(*model)
        if result.get('count', 0):
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
            raven.captureMessage("[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %s [%d]" %
                                 (result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()),
                                 data=dict(level=20, logger='limitation'), extra=raven_extra)
            extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='old')
            logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update(dict(log_id=log_id))
                logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))