def test_symbols(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 symbols_size = 100 * 1024 * 1023 symbols = SymbolsFactory.create_batch(20, file_size=symbols_size) deleted_symbols = symbols[7] self.assertEqual(Symbols.objects.count(), 20) extra_meta = dict(count=10, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Symbols', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(id=deleted_symbols.id, element_created=deleted_symbols.created.strftime( "%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['crash', 'Symbols'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def auto_delete_duplicate_crashes(): logger = logging.getLogger('limitation') result = delete_duplicate_crashes() if result.get('count', 0): result['size'] /= 1024.0 * 1024 log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage( "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %.2f Mb[%d]" % (result['count'], result['size'], time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=result['size'], reason='duplicated', model='Crash') logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update(dict(log_id=log_id)) logger.info( add_extra_to_log_message('Automatic cleanup element', extra=element))
def test_omaha_versions(self, mocked_get_logger): gpm['Version__limit_size'] = 1 version_size = 1000 * 1024 * 1023 versions = VersionFactory.create_batch(2, file_size=version_size) deleted_version = versions[0] self.assertEqual(Version.objects.count(), 2) extra_meta = dict(count=1, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Version', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(Version_id=deleted_version.id, element_created=deleted_version.created.strftime( "%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['omaha', 'Version'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 2) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_crashes(self, mocked_get_logger): gpm['Crash__duplicate_number'] = 2 crashes = CrashFactory.create_batch(10, signature='test') deleted_crash = crashes[7] self.assertEqual(Crash.objects.all().count(), 10) extra_meta = dict(count=8, reason='duplicated', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Crash', size='0 bytes') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Crash_id=deleted_crash.id, element_created=deleted_crash.created.strftime( "%d. %B %Y %I:%M%p"), signature=deleted_crash.signature, userid=deleted_crash.userid, appid=deleted_crash.appid, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_duplicate_crashes() self.assertEqual(mocked_logger.info.call_count, 10) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_storage_days'] = 2 with freeze_time("2012-12-21 12:00:00"): feedbacks = FeedbackFactory.create_batch(10) deleted_feedback = feedbacks[-1] self.assertEqual(Feedback.objects.all().count(), 10) extra_meta = dict(count=10, reason='old', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', size='0 bytes') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime( "%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_older_than() self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def auto_delete_size_is_exceeded(): logger = logging.getLogger('limitation') model_list = [('crash', 'Crash'), ('feedback', 'Feedback')] for model in model_list: result = delete_size_is_exceeded(*model) if result.get('count', 0): result['size'] /= 1024.0 * 1024 log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage( "[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %.2f Mb[%d]" % (result['count'], model[1], result['size'], time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=result['size'], model=model[1], reason='size_is_exceeded') logger.info( add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update(dict(log_id=log_id)) logger.info( add_extra_to_log_message('Automatic cleanup element', extra=element))
def auto_delete_duplicate_crashes(): logger = logging.getLogger('limitation') result = delete_duplicate_crashes() if result.get('count', 0): log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None ids_list = sorted([element['id'] for element in result['elements']]) raven_extra = { "id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "crash_list": ids_list } raven.captureMessage( "[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]" % (result['count'], filters.filesizeformat(result['size']).replace( u'\xa0', u' '), time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace( u'\xa0', u' '), reason='duplicated', model='Crash') logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update({"log_id": log_id, "Crash_id": element.pop('id')}) logger.info( add_extra_to_log_message('Automatic cleanup element', extra=element))
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None): logger = logging.getLogger('limitation') full_result = dict(count=0, size=0, elements=[]) if limit_duplicated: result = delete_duplicate_crashes(limit=limit_duplicated) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_days: result = delete_older_than(*model, limit=limit_days) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_size: result = delete_size_is_exceeded(*model, limit=limit_size) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] full_result['size'] /= 1024.0 * 1024 log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage( "[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]" % (full_result['count'], model[1], full_result['size'], log_id), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=full_result['size'], model=model[1], limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual') logger.info(add_extra_to_log_message('Manual cleanup', extra=extra)) for element in full_result['elements']: element.update(dict(log_id=log_id)) logger.info( add_extra_to_log_message('Manual cleanup element', extra=element))
def auto_delete_duplicate_crashes(): logger = logging.getLogger('limitation') result = delete_duplicate_crashes() if result.get('count', 0): log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage("[Limitation]Periodic task 'Duplicated' cleaned up %d crashes, total size of cleaned space is %s [%d]" % (result['count'], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), reason='duplicated', model='Crash') logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update(dict(log_id=log_id)) logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
def auto_delete_size_is_exceeded(): logger = logging.getLogger('limitation') model_list = [('crash', 'Crash'), ('feedback', 'Feedback')] for model in model_list: result = delete_size_is_exceeded(*model) if result.get('count', 0): log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None ids_list = sorted( [element['id'] for element in result['elements']]) raven_extra = { "id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list } raven.captureMessage( "[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %s [%d]" % (result['count'], model[1], filters.filesizeformat(result['size']).replace( u'\xa0', u' '), time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace( u'\xa0', u' '), model=model[1], reason='size_is_exceeded') logger.info( add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update({ "log_id": log_id, "%s_id" % (model[1]): element.pop('id') }) logger.info( add_extra_to_log_message('Automatic cleanup element', extra=element))
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None): logger = logging.getLogger('limitation') full_result = dict(count=0, size=0, elements=[]) if limit_duplicated: result = delete_duplicate_crashes(limit=limit_duplicated) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_days: result = delete_older_than(*model, limit=limit_days) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_size: result = delete_size_is_exceeded(*model, limit=limit_size) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None ids_list = sorted([element['id'] for element in full_result['elements']]) raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list} raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %s [%s]" % (full_result['count'], model[1], filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), log_id), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=filters.filesizeformat(full_result['size']).replace(u'\xa0', u' '), model=model[1], limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual') logger.info(add_extra_to_log_message('Manual cleanup', extra=extra)) for element in full_result['elements']: element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')}) logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
def deferred_manual_cleanup(model, limit_size=None, limit_days=None, limit_duplicated=None): logger = logging.getLogger('limitation') full_result = dict(count=0, size=0, elements=[]) if limit_duplicated: result = delete_duplicate_crashes(limit=limit_duplicated) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_days: result = delete_older_than(*model, limit=limit_days) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] if limit_size: result = delete_size_is_exceeded(*model, limit=limit_size) if result.get('count', 0): full_result['count'] += result['count'] full_result['size'] += result['size'] full_result['elements'] += result['elements'] full_result['size'] /= 1024.0 * 1024 log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage("[Limitation]Manual cleanup freed %d %s, total size of cleaned space is %.2f Mb[%s]" % (full_result['count'], model[1], full_result['size'], log_id), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=full_result['count'], size=full_result['size'], model=model[1], limit_duplicated=limit_duplicated, limit_size=limit_size, limit_days=limit_days, reason='manual') logger.info(add_extra_to_log_message('Manual cleanup', extra=extra)) for element in full_result['elements']: element.update(dict(log_id=log_id)) logger.info(add_extra_to_log_message('Manual cleanup element', extra=element))
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 feedback_size = 100 * 1024 * 1023 feedbacks = FeedbackFactory.create_batch(20, screenshot_size=feedback_size, system_logs_size=0, attached_file_size=0, blackbox_size=0) deleted_feedback = feedbacks[7] self.assertEqual(Feedback.objects.count(), 20) extra_meta = dict(count=10, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime( "%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['feedback', 'Feedback'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_sparkle_versions(self, mocked_get_logger): gpm['SparkleVersion__limit_size'] = 1 version_size = 1000*1024*1023 versions = SparkleVersionFactory.create_batch(2, file_size=version_size) deleted_version = versions[0] self.assertEqual(SparkleVersion.objects.count(), 2) extra_meta = dict(count=1, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='SparkleVersion', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(SparkleVersion_id=deleted_version.id, element_created=deleted_version.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['sparkle', 'SparkleVersion'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 2) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_symbols(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 symbols_size = 100*1024*1023 symbols = SymbolsFactory.create_batch(20, file_size=symbols_size) deleted_symbols = symbols[7] self.assertEqual(Symbols.objects.count(), 20) extra_meta = dict(count=10, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Symbols', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(Symbols_id=deleted_symbols.id, element_created=deleted_symbols.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['crash', 'Symbols'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def auto_delete_older_than(): logger = logging.getLogger('limitation') model_list = [ ('crash', 'Crash'), ('feedback', 'Feedback') ] for model in model_list: result = delete_older_than(*model) if result.get('count', 0): log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None raven_extra = dict(id=log_id, splunk_url=splunk_url, splunk_filter=splunk_filter) raven.captureMessage("[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %s [%d]" % (result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='old') logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update(dict(log_id=log_id)) logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
def test_crashes(self, mocked_get_logger): gpm['Crash__limit_size'] = 1 crash_size = 10 * 1024 * 1023 crashes = CrashFactory.create_batch(200, archive_size=crash_size, minidump_size=0) deleted_crash = crashes[97] self.assertEqual(Crash.objects.all().count(), 200) extra_meta = dict(count=98, reason='size_is_exceeded', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Crash', size='979.0 MB') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Crash_id=deleted_crash.id, element_created=deleted_crash.created.strftime( "%d. %B %Y %I:%M%p"), signature=deleted_crash.signature, userid=deleted_crash.userid, appid=deleted_crash.appid, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID( '36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_size_is_exceeded() self.assertEqual(mocked_logger.info.call_count, 99) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_storage_days'] = 2 with freeze_time("2012-12-21 12:00:00"): feedbacks = FeedbackFactory.create_batch(10) deleted_feedback = feedbacks[-1] self.assertEqual(Feedback.objects.all().count(), 10) extra_meta = dict(count=10, reason='old', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', size='0 bytes') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_older_than() self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 feedback_size = 10*1024*1023 feedbacks = FeedbackFactory.create_batch(200, screenshot_size=feedback_size, system_logs_size=0, attached_file_size=0, blackbox_size=0) deleted_feedback = feedbacks[97] self.assertEqual(Feedback.objects.all().count(), 200) extra_meta = dict(count=98, reason='size_is_exceeded', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', size='979.0 MB') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_size_is_exceeded() self.assertEqual(mocked_logger.info.call_count, 99) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 feedback_size = 10*1024*1023 feedbacks = FeedbackFactory.create_batch(200, screenshot_size=feedback_size, system_logs_size=0, attached_file_size=0, blackbox_size=0) deleted_feedback = feedbacks[97] self.assertEqual(Feedback.objects.all().count(), 200) extra_meta = dict(count=98, reason='size_is_exceeded', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', size='979.0 MB') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_size_is_exceeded() self.assertEqual(mocked_logger.info.call_count, 99) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_feedbacks(self, mocked_get_logger): gpm['Feedback__limit_size'] = 1 feedback_size = 100*1024*1023 feedbacks = FeedbackFactory.create_batch(20, screenshot_size=feedback_size, system_logs_size=0, attached_file_size=0, blackbox_size=0) deleted_feedback = feedbacks[7] self.assertEqual(Feedback.objects.count(), 20) extra_meta = dict(count=10, reason='manual', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Feedback', limit_duplicated=None, limit_size=1, limit_days=None, size='999.0 MB') log_extra_msg = add_extra_to_log_message('Manual cleanup', extra=extra_meta) extra = dict(Feedback_id=deleted_feedback.id, element_created=deleted_feedback.created.strftime("%d. %B %Y %I:%M%p"), log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Manual cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) deferred_manual_cleanup(['feedback', 'Feedback'], limit_size=1) self.assertEqual(mocked_logger.info.call_count, 11) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def test_crashes(self, mocked_get_logger): gpm['Crash__duplicate_number'] = 2 crashes = CrashFactory.create_batch(10, signature='test') deleted_crash = crashes[7] self.assertEqual(Crash.objects.all().count(), 10) extra_meta = dict(count=8, reason='duplicated', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Crash', size='0 bytes') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Crash_id=deleted_crash.id, element_created=deleted_crash.created.strftime("%d. %B %Y %I:%M%p"), signature=deleted_crash.signature, userid=deleted_crash.userid, appid=deleted_crash.appid, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_duplicate_crashes() self.assertEqual(mocked_logger.info.call_count, 10) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def auto_delete_size_is_exceeded(): logger = logging.getLogger('limitation') model_list = [ ('crash', 'Crash'), ('feedback', 'Feedback') ] for model in model_list: result = delete_size_is_exceeded(*model) if result.get('count', 0): log_id = str(uuid.uuid4()) params = dict(log_id=log_id) splunk_url = get_splunk_url(params) splunk_filter = 'log_id=%s' % log_id if splunk_url else None ids_list = sorted([element['id'] for element in result['elements']]) raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list} raven.captureMessage("[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %s [%d]" % (result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()), data=dict(level=20, logger='limitation'), extra=raven_extra) extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='size_is_exceeded') logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) for element in result['elements']: element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')}) logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element))
def test_crashes(self, mocked_get_logger): gpm['Crash__limit_size'] = 1 crash_size = 10*1024*1023 crashes = CrashFactory.create_batch(200, archive_size=crash_size, minidump_size=0) deleted_crash = crashes[97] self.assertEqual(Crash.objects.all().count(), 200) extra_meta = dict(count=98, reason='size_is_exceeded', meta=True, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00', model='Crash', size='979.0 MB') log_extra_msg = add_extra_to_log_message('Automatic cleanup', extra=extra_meta) extra = dict(Crash_id=deleted_crash.id, element_created=deleted_crash.created.strftime("%d. %B %Y %I:%M%p"), signature=deleted_crash.signature, userid=deleted_crash.userid, appid=deleted_crash.appid, log_id='36446dc3-ae7c-42ad-ae4e-6a826dcf0a00') log_msg = add_extra_to_log_message('Automatic cleanup element', extra=extra) mocked_logger = mocked_get_logger.return_value with patch('uuid.uuid4') as mocked_uuid4: mocked_uuid4.side_effect = (uuid.UUID('36446dc3-ae7c-42ad-ae4e-6a826dcf0a%02d' % x) for x in range(100)) auto_delete_size_is_exceeded() self.assertEqual(mocked_logger.info.call_count, 99) mocked_logger.info.assert_any_call(log_extra_msg) mocked_logger.info.assert_any_call(log_msg)
def send(self, message, extra={}, tags={}, sentry_data={}, crash_obj=None): logger = logging.getLogger('crashes') extra.update(tags) extra['app_version'] = tags['ver'] if 'ver' in tags else 'unknown' # We don't want "sentry.interfaces" or other sentry specific things as part of any field name. extra['exception'] = str(sentry_data.get('sentry.interfaces.Exception')) extra['user'] = sentry_data.get('sentry.interfaces.User') # will be 'None' if no user in sentry_data. # User is passed in as "dict(id=crash.userid)". Unpack. if type(extra['user']) is dict: extra['user'] = extra['user'].get('id') # The "message" is actually a crash signature, not appropriate for the ELK "message" field. extra['signature'] = message # All ELK messages are expected to include logger_name. extra['logger_name'] = 'omaha_server' # Send message with logger. logger.info(add_extra_to_log_message("received crash report", extra=extra))
def send(self, message, extra={}, tags={}, sentry_data={}, crash_obj=None): logger = logging.getLogger('crashes') extra.update(tags) extra['app_version'] = tags['ver'] if 'ver' in tags else 'unknown' # We don't want "sentry.interfaces" or other sentry specific things as part of any field name. extra['exception'] = str( sentry_data.get('sentry.interfaces.Exception')) extra['user'] = sentry_data.get( 'sentry.interfaces.User' ) # will be 'None' if no user in sentry_data. # User is passed in as "dict(id=crash.userid)". Unpack. if type(extra['user']) is dict: extra['user'] = extra['user'].get('id') # The "message" is actually a crash signature, not appropriate for the ELK "message" field. extra['signature'] = message # All ELK messages are expected to include logger_name. extra['logger_name'] = 'omaha_server' # Send message with logger. logger.info( add_extra_to_log_message("received crash report", extra=extra))
def test_add_extra_to_log_message(self): msg = 'test' extra = dict(a=1, c=3, b=2, d=4) expected_msg = 'test, a=1 , b=2 , c=3 , d=4' actual_msg = add_extra_to_log_message(msg, extra) self.assertEqual(actual_msg, expected_msg)
def test_add_extra_to_log_message(self): msg = 'test' extra = dict(a=1, c=3, b=2, d=4) expected_msg = 'test, a=1 , b=2 , c=3 , d=4' actual_msg = add_extra_to_log_message(msg, extra) self.assertEqual(actual_msg, expected_msg)