Exemple #1
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        ids_list = sorted([element['id'] for element in result['elements']])
        raven_extra = {
            "id": log_id,
            "splunk_url": splunk_url,
            "splunk_filter": splunk_filter,
            "crash_list": ids_list
        }
        raven.captureMessage(
            "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]"
            % (result['count'], filters.filesizeformat(result['size']).replace(
                u'\xa0', u' '), time.time()),
            data=dict(level=20, logger='limitation'),
            extra=raven_extra)
        extra = dict(log_id=log_id,
                     meta=True,
                     count=result['count'],
                     size=filters.filesizeformat(result['size']).replace(
                         u'\xa0', u' '),
                     reason='duplicated',
                     model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update({"log_id": log_id, "Crash_id": element.pop('id')})
            logger.info(
                add_extra_to_log_message('Automatic cleanup element',
                                         extra=element))
Exemple #2
0
def auto_delete_size_is_exceeded():
    logger = logging.getLogger('limitation')
    model_list = [('crash', 'Crash'), ('feedback', 'Feedback')]
    for model in model_list:
        result = delete_size_is_exceeded(*model)
        if result.get('count', 0):
            result['size'] /= 1024.0 * 1024
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            raven_extra = dict(id=log_id,
                               splunk_url=splunk_url,
                               splunk_filter=splunk_filter)
            raven.captureMessage(
                "[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %.2f Mb[%d]"
                % (result['count'], model[1], result['size'], time.time()),
                data=dict(level=20, logger='limitation'),
                extra=raven_extra)
            extra = dict(log_id=log_id,
                         meta=True,
                         count=result['count'],
                         size=result['size'],
                         model=model[1],
                         reason='size_is_exceeded')
            logger.info(
                add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update(dict(log_id=log_id))
                logger.info(
                    add_extra_to_log_message('Automatic cleanup element',
                                             extra=element))
Exemple #3
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        result['size'] /= 1024.0 * 1024
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        raven_extra = dict(id=log_id,
                           splunk_url=splunk_url,
                           splunk_filter=splunk_filter)
        raven.captureMessage(
            "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %.2f Mb[%d]"
            % (result['count'], result['size'], time.time()),
            data=dict(level=20, logger='limitation'),
            extra=raven_extra)
        extra = dict(log_id=log_id,
                     meta=True,
                     count=result['count'],
                     size=result['size'],
                     reason='duplicated',
                     model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update(dict(log_id=log_id))
            logger.info(
                add_extra_to_log_message('Automatic cleanup element',
                                         extra=element))
Exemple #4
0
def deferred_manual_cleanup(model,
                            limit_size=None,
                            limit_days=None,
                            limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id,
                       splunk_url=splunk_url,
                       splunk_filter=splunk_filter)
    raven.captureMessage(
        "[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]"
        % (full_result['count'], model[1], full_result['size'], log_id),
        data=dict(level=20, logger='limitation'),
        extra=raven_extra)

    extra = dict(log_id=log_id,
                 meta=True,
                 count=full_result['count'],
                 size=full_result['size'],
                 model=model[1],
                 limit_duplicated=limit_duplicated,
                 limit_size=limit_size,
                 limit_days=limit_days,
                 reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(
            add_extra_to_log_message('Manual cleanup element', extra=element))
Exemple #5
0
def auto_delete_duplicate_crashes():
    logger = logging.getLogger('limitation')
    result = delete_duplicate_crashes()
    if result.get('count', 0):
        log_id = str(uuid.uuid4())
        params = dict(log_id=log_id)
        splunk_url = get_splunk_url(params)
        splunk_filter = 'log_id=%s' % log_id if splunk_url else None
        raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
        raven.captureMessage("[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]" %
                             (result['count'], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()),
                             data=dict(level=20, logger='limitation'), extra=raven_extra)
        extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), reason='duplicated', model='Crash')
        logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
        for element in result['elements']:
            element.update(dict(log_id=log_id))
            logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
Exemple #6
0
def auto_delete_size_is_exceeded():
    logger = logging.getLogger('limitation')
    model_list = [('crash', 'Crash'), ('feedback', 'Feedback')]
    for model in model_list:
        result = delete_size_is_exceeded(*model)
        if result.get('count', 0):
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            ids_list = sorted(
                [element['id'] for element in result['elements']])
            raven_extra = {
                "id": log_id,
                "splunk_url": splunk_url,
                "splunk_filter": splunk_filter,
                "%s_list" % (model[1]): ids_list
            }
            raven.captureMessage(
                "[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %s [%d]"
                % (result['count'], model[1],
                   filters.filesizeformat(result['size']).replace(
                       u'\xa0', u' '), time.time()),
                data=dict(level=20, logger='limitation'),
                extra=raven_extra)
            extra = dict(log_id=log_id,
                         meta=True,
                         count=result['count'],
                         size=filters.filesizeformat(result['size']).replace(
                             u'\xa0', u' '),
                         model=model[1],
                         reason='size_is_exceeded')
            logger.info(
                add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update({
                    "log_id": log_id,
                    "%s_id" % (model[1]): element.pop('id')
                })
                logger.info(
                    add_extra_to_log_message('Automatic cleanup element',
                                             extra=element))
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    full_result['size'] /= 1024.0 * 1024
    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]" %
                         (full_result['count'], model[1], full_result['size'], log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=full_result['size'], model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update(dict(log_id=log_id))
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
Exemple #8
0
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None):
    logger = logging.getLogger('limitation')
    full_result = dict(count=0, size=0, elements=[])
    if limit_duplicated:
        result = delete_duplicate_crashes(limit=limit_duplicated)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_days:
        result = delete_older_than(*model, limit=limit_days)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    if limit_size:
        result = delete_size_is_exceeded(*model, limit=limit_size)
        if result.get('count', 0):
            full_result['count'] += result['count']
            full_result['size'] += result['size']
            full_result['elements'] += result['elements']

    log_id = str(uuid.uuid4())
    params = dict(log_id=log_id)
    splunk_url = get_splunk_url(params)
    splunk_filter = 'log_id=%s' % log_id if splunk_url else None
    ids_list = sorted([element['id'] for element in full_result['elements']])
    raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list}
    raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %s [%s]" %
                         (full_result['count'], model[1], filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), log_id),
                         data=dict(level=20, logger='limitation'), extra=raven_extra)

    extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), model=model[1],
                 limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual')
    logger.info(add_extra_to_log_message('Manual cleanup', extra=extra))
    for element in full_result['elements']:
        element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')})
        logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
Exemple #9
0
def auto_delete_older_than():
    logger = logging.getLogger('limitation')
    model_list = [
        ('crash', 'Crash'),
        ('feedback', 'Feedback')
    ]
    for model in model_list:
        result = delete_older_than(*model)
        if result.get('count', 0):
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter)
            raven.captureMessage("[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %s [%d]" %
                                 (result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()),
                                 data=dict(level=20, logger='limitation'), extra=raven_extra)
            extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='old')
            logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update(dict(log_id=log_id))
                logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
Exemple #10
0
def auto_delete_size_is_exceeded():
    logger = logging.getLogger('limitation')
    model_list = [
        ('crash', 'Crash'),
        ('feedback', 'Feedback')
    ]
    for model in model_list:
        result = delete_size_is_exceeded(*model)
        if result.get('count', 0):
            log_id = str(uuid.uuid4())
            params = dict(log_id=log_id)
            splunk_url = get_splunk_url(params)
            splunk_filter = 'log_id=%s' % log_id if splunk_url else None
            ids_list = sorted([element['id'] for element in result['elements']])
            raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list}
            raven.captureMessage("[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %s [%d]" %
                                 (result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()),
                                 data=dict(level=20, logger='limitation'), extra=raven_extra)
            extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='size_is_exceeded')
            logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra))
            for element in result['elements']:
                element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')})
                logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
Exemple #11
0
 def test_add_extra_to_log_message(self):
     params = dict(a=1, c=3, b=2, d=4)
     actual_msg = get_splunk_url(params)
     expected_msg = 'http://splunk.example.com/en-US/app/search/search?q=search a=1 b=2 c=3 d=4'
     self.assertEqual(actual_msg, expected_msg)
Exemple #12
0
 def test_add_extra_to_log_message(self):
     params = dict(a=1, c=3, b=2, d=4)
     actual_msg = get_splunk_url(params)
     expected_msg = 'http://splunk.example.com/en-US/app/search/search?q=search a=1 b=2 c=3 d=4'
     self.assertEqual(actual_msg, expected_msg)