예제 #1
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        result['size'] /= 1024.0 * 1024
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        raven_extra = dict(id=log_id,
                           splunk_url=splunk_url,
                           splunk_filter=splunk_filter)
        raven.captureMessage(
            "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %.2f Mb[%d]"
            % (result['count'], result['size'], time.time()),
            data=dict(level=20, logger='limitation'),
            extra=raven_extra)
        extra = dict(log_id=log_id,
                     meta=True,
                     count=result['count'],
                     size=result['size'],
                     reason='duplicated',
                     model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update(dict(log_id=log_id))
            logger.info(
                add_extra_to_log_message('Automatic cleanup element',
                                         extra=element))
예제 #2
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        ids_list = sorted([element['id'] for element in result['elements']])
        raven_extra = {
            "id": log_id,
            "splunk_url": splunk_url,
            "splunk_filter": splunk_filter,
            "crash_list": ids_list
        }
        raven.captureMessage(
            "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]"
            % (result['count'], filters.filesizeformat(result['size']).replace(
                u'\xa0', u' '), time.time()),
            data=dict(level=20, logger='limitation'),
            extra=raven_extra)
        extra = dict(log_id=log_id,
                     meta=True,
                     count=result['count'],
                     size=filters.filesizeformat(result['size']).replace(
                         u'\xa0', u' '),
                     reason='duplicated',
                     model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update({"log_id": log_id, "Crash_id": element.pop('id')})
            logger.info(
                add_extra_to_log_message('Automatic cleanup element',
                                         extra=element))
예제 #3
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    full_result = dict(count=0, size=0, elements=[], signatures={})
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']
            full_result['signatures'].update(result['signatures'])

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    extra = dict(elements=full_result['elements'])
    extra.update(full_result['signatures'])
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%d]" %
                         (full_result['count'], model[1], full_result['size'], time.time()),
                         data=dict(level=20, logger='limitation'), extra=extra)
예제 #4
0
def auto_delete_duplicate_crashes():
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        result['size'] /= 1024.0 * 1024
        raven.captureMessage("[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %.2f Mb[%d]" %
                             (result['count'], result['size'], time.time()),
                             data=dict(level=20, logger='limitation'), extra=result['signatures'])
예제 #5
0
    def test_crashes(self):
        gpm['Crash__duplicate_number'] = 10
        CrashFactory.create_batch(25, signature='test1')
        self.assertEqual(Crash.objects.filter(signature='test1').count(), 25)
        CrashFactory.create_batch(9, signature='test2')
        self.assertEqual(Crash.objects.filter(signature='test2').count(), 9)

        deleted = list(
            Crash.objects.filter(
                signature='test1').order_by('created').values_list(
                    'id', 'created', 'signature', 'userid', 'appid'))[:15]
        deleted = [
            dict(id=x[0],
                 element_created=x[1].strftime("%d. %B %Y %I:%M%p"),
                 signature=x[2],
                 userid=x[3],
                 appid=x[4]) for x in deleted
        ]

        result = delete_duplicate_crashes()

        self.assertDictEqual(result, dict(count=15, size=0, elements=deleted))
        self.assertEqual(
            Crash.objects.filter(signature='test1').count(),
            gpm['Crash__duplicate_number'])
        self.assertEqual(Crash.objects.filter(signature='test2').count(), 9)
예제 #6
0
def deferred_manual_cleanup(model,
                            limit_size=None,
                            limit_days=None,
                            limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id,
                       splunk_url=splunk_url,
                       splunk_filter=splunk_filter)
    raven.captureMessage(
        "[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]"
        % (full_result['count'], model[1], full_result['size'], log_id),
        data=dict(level=20, logger='limitation'),
        extra=raven_extra)

    extra = dict(log_id=log_id,
                 meta=True,
                 count=full_result['count'],
                 size=full_result['size'],
                 model=model[1],
                 limit_duplicated=limit_duplicated,
                 limit_size=limit_size,
                 limit_days=limit_days,
                 reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(
            add_extra_to_log_message('Manual cleanup element', extra=element))
    def test_crashes(self):
        gpm['Crash__duplicate_number'] = 10
        CrashFactory.create_batch(25, signature='test1')
        self.assertEqual(Crash.objects.filter(signature='test1').count(), 25)
        CrashFactory.create_batch(9, signature='test2')
        self.assertEqual(Crash.objects.filter(signature='test2').count(), 9)

        deleted = list(Crash.objects.filter(signature='test1').values_list('id', 'created', 'signature', 'userid', 'appid'))[:15]
        deleted = map(lambda x: dict(id=x[0], element_created=x[1].strftime("%d. %B %Y %I:%M%p"), signature=x[2],
                                     userid=x[3], appid=x[4]), deleted)

        result = delete_duplicate_crashes()

        self.assertDictEqual(result, dict(count=15, size=0, elements=deleted))
        self.assertEqual(Crash.objects.filter(signature='test1').count(), gpm['Crash__duplicate_number'])
        self.assertEqual(Crash.objects.filter(signature='test2').count(), 9)
예제 #8
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
        raven.captureMessage("[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]" %
                             (result['count'], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()),
                             data=dict(level=20, logger='limitation'), extra=raven_extra)
        extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), reason='duplicated', model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update(dict(log_id=log_id))
            logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
예제 #9
0
    def test_crashes(self):
        gpm["Crash__duplicate_number"] = 10
        CrashFactory.create_batch(20, signature="test1")
        self.assertEqual(Crash.objects.filter(signature="test1").count(), 20)
        CrashFactory.create_batch(9, signature="test2")
        self.assertEqual(Crash.objects.filter(signature="test2").count(), 9)

        created_to_string = lambda x: "Created: %s" % x.strftime("%d. %B %Y %I:%M%p")
        signature_to_string = lambda x: "Signature: %s" % x
        deleted = list(Crash.objects.filter(signature="test1").values_list("id", "created", "signature"))[:10]
        deleted = map(lambda x: (x[0], created_to_string(x[1]), signature_to_string(x[2])), deleted)
        signatures = dict(test1=deleted)

        result = delete_duplicate_crashes()
        self.assertDictEqual(result, dict(count=10, size=0, elements=deleted, signatures=signatures))
        self.assertEqual(Crash.objects.filter(signature="test1").count(), gpm["Crash__duplicate_number"])
        self.assertEqual(Crash.objects.filter(signature="test2").count(), 9)
예제 #10
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]" %
                         (full_result['count'], model[1], full_result['size'], log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=full_result['size'], model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
예제 #11
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    ids_list = sorted([element['id'] for element in full_result['elements']])
    raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list}
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %s [%s]" %
                         (full_result['count'], model[1], filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')})
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))