def test_wrong_signature(self, mocked_subprocess_module): config = self.get_basic_config() raw_crash = copy.copy(canonical_standard_raw_crash) raw_dumps = {config.dump_field: 'a_fake_dump.dump'} processed_crash = DotDict() processed_crash.product = 'Firefox' processed_crash.os_name = 'Windows NT' processed_crash.cpu_name = 'x86' processed_crash.signature = 'this-is-not-a-JIT-signature' processed_crash['json_dump.crashing_thread.frames'] = [ DotDict({'not_module': 'not-a-module',}), DotDict({'module': 'a-module',}) ] processor_meta = self.get_basic_processor_meta() mocked_subprocess_handle = ( mocked_subprocess_module.Popen.return_value ) mocked_subprocess_handle.stdout.read.return_value = ( 'EXTRA-SPECIAL' ) mocked_subprocess_handle.wait.return_value = 0 rule = JitCrashCategorizeRule(config) # the call to be tested rule.act(raw_crash, raw_dumps, processed_crash, processor_meta) ok_('classifications.jit.category' not in processed_crash) ok_('classifications.jit.category_return_code' not in processed_crash)
def test_predicate(self): jd = copy.deepcopy(cannonical_json_dump) processed_crash = DotDict() processed_crash.json_dump = jd raw_crash = DotDict() raw_crash.ProductName = 'Firefox' raw_crash.Version = '16' raw_dumps = {} fake_processor = create_basic_fake_processor() fake_processor.config.firefox_out_of_date_version = '17' classifier = OutOfDateClassifier() ok_( classifier._predicate(raw_crash, raw_dumps, processed_crash, fake_processor)) raw_crash.Version = '19' ok_(not classifier._predicate(raw_crash, raw_dumps, processed_crash, fake_processor)) raw_crash.Version = '12' raw_crash.ProductName = 'NotFireFox' ok_(not classifier._predicate(raw_crash, raw_dumps, processed_crash, fake_processor))
def test_blocking_start(self): config = DotDict() config.logger = self.logger config.idle_delay = 1 config.quit_on_empty_queue = False class MyTaskManager(TaskManager): def _responsive_sleep(self, seconds, wait_log_interval=0, wait_reason=''): try: if self.count >= 2: self.quit = True self.count += 1 except AttributeError: self.count = 0 tm = MyTaskManager(config, task_func=Mock()) waiting_func = Mock() tm.blocking_start(waiting_func=waiting_func) eq_(tm.task_func.call_count, 10) eq_(waiting_func.call_count, 0)
def test_get_iterator(self): config = DotDict() config.logger = self.logger config.quit_on_empty_queue = False tm = TaskManager( config, job_source_iterator=range(1), ) eq_(tm._get_iterator(), [0]) def an_iter(self): for i in range(5): yield i tm = TaskManager( config, job_source_iterator=an_iter, ) eq_([x for x in tm._get_iterator()], [0, 1, 2, 3, 4]) class X(object): def __init__(self, config): self.config = config def __iter__(self): for key in self.config: yield key tm = TaskManager(config, job_source_iterator=X(config)) eq_([x for x in tm._get_iterator()], [y for y in config.keys()])
def test_basic_get(self, logging_info): config_ = DotDict( logger=logging, web_server=DotDict( ip_address='127.0.0.1', port='88888' ) ) # what the middleware app does is that it creates a class based on # another and sets an attribute called `cls` class MadeUp(middleware_app.MeasuringImplementationWrapper): cls = AuxImplementation1 all_services = {} config = config_ server = CherryPy(config_, ( ('/aux/(.*)', MadeUp), )) testapp = TestApp(server._wsgi_func) response = testapp.get('/aux/', params={'add': 1}) eq_(response.status, 200) for call in logging_info.call_args_list: # mock calls are funny args = call[0] arg = args[0] if re.findall('measuringmiddleware:[\d\.]+\t/aux/\t\?add=1', arg): break else: raise AssertionError('call never found')
def _add_classification(self, processed_crash, classification, classification_data, logger=None): """This method adds a 'support' classification to a processed crash. parameters: processed_crash - a reference to the processed crash to which the classification is to be added. classification - a string that is the classification. classification_data - a string of extra data that goes along with a classification """ if 'classifications' not in processed_crash: processed_crash['classifications'] = DotDict() processed_crash['classifications']['support'] = DotDict({ 'classification': classification, 'classification_data': classification_data, 'classification_version': self.version() }) if logger: logger.debug('Support classification: %s', classification) return True
def test_action_case_4(self): """nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump""" pc = DotDict() pc.dump = DotDict() pijd = copy.deepcopy(cannonical_json_dump) pc.dump.json_dump = pijd f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][2] \ ['function'] = 'NtUserSetWindowPos' fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = SetWindowPos() action_result = rule.action(rc, rd, pc, fake_processor) ok_(action_result) ok_('classifications' in pc) ok_('skunk_works' in pc.classifications) eq_( pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other' )
def setup_mocked_s3_storage( self, executor=TransactionExecutor, executor_for_gets=TransactionExecutor, storage_class='BotoS3CrashStorage', host='', port=0, resource_class=S3ConnectionContext, **extra ): config = DotDict({ 'resource_class': resource_class, 'logger': mock.Mock(), 'host': host, 'port': port, 'access_key': 'this is the access key', 'secret_access_key': 'secrets', 'bucket_name': 'silliness', 'keybuilder_class': KeyBuilderBase, 'prefix': 'dev', 'calling_format': mock.Mock() }) config.update(extra) s3_conn = resource_class(config) s3_conn._connect_to_endpoint = mock.Mock() s3_conn._mocked_connection = s3_conn._connect_to_endpoint.return_value s3_conn._calling_format.return_value = mock.Mock() s3_conn._CreateError = mock.Mock() s3_conn.ResponseError = mock.Mock() s3_conn._open = mock.MagicMock() return s3_conn
def test_action_case_1(self): """sentinel exsits in stack, but no secondaries""" pc = DotDict() pc.process_type = 'plugin' pijd = copy.deepcopy(cannonical_json_dump) pc.json_dump = pijd pc.json_dump['crashing_thread']['frames'][2]['function'] = \ 'NtUserSetWindowPos' f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = SetWindowPos() action_result = rule.action(rc, rd, pc, fake_processor) ok_(action_result) ok_('classifications' in pc) ok_('skunk_works' in pc.classifications) eq_( pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other' )
def test_doing_work_with_two_workers_and_generator(self): config = DotDict() config.logger = self.logger config.number_of_threads = 2 config.maximum_queue_size = 2 my_list = [] def insert_into_list(anItem): my_list.append(anItem) ttm = ThreadedTaskManager(config, task_func=insert_into_list, job_source_iterator=(((x, ), {}) for x in xrange(10))) try: ttm.start() time.sleep(0.2) ok_( len(ttm.thread_list) == 2, "expected 2 threads, but found %d" % len(ttm.thread_list)) ok_( len(my_list) == 10, 'expected to do 10 inserts, ' 'but %d were done instead' % len(my_list)) ok_( sorted(my_list) == range(10), 'expected %s, but got %s' % (range(10), sorted(my_list))) except Exception: # we got threads to join ttm.wait_for_completion() raise
def test_put_with_data(self, logging_info): # what the middleware app does is that it creates a class based on # another and sets an attribute called `cls` class MadeUp(middleware_app.ImplementationWrapper): cls = AuxImplementation4 all_services = {} config = DotDict( logger=logging, web_server=DotDict( ip_address='127.0.0.1', port='88888' ) ) server = CherryPy(config, ( ('/aux/(.*)', MadeUp), )) testapp = TestApp(server._wsgi_func) response = testapp.put('/aux/', params={'add': 1}) eq_(response.status, 200) eq_(json.loads(response.body), {'age': 101}) logging_info.assert_called_with('Running AuxImplementation4')
def test_stuff_missing(self): config = self.get_basic_config() raw_crash = copy.copy(canonical_standard_raw_crash) raw_dumps = {} system_info = copy.copy( canonical_processed_crash['json_dump']['system_info'] ) del system_info['cpu_count'] processed_crash = DotDict() processed_crash.json_dump = { 'system_info': system_info } processor_meta = self.get_basic_processor_meta() rule = CPUInfoRule(config) # the call to be tested rule.act(raw_crash, raw_dumps, processed_crash, processor_meta) eq_( processed_crash.cpu_info, "GenuineIntel family 6 model 42 stepping 7" ) eq_(processed_crash.cpu_name, 'x86') # raw crash should be unchanged eq_(raw_crash, canonical_standard_raw_crash)
def test_no_source(self): class FakeStorageDestination(object): def __init__(self, config, quit_check_callback): self.store = DotDict() self.dumps = DotDict() def save_raw_crash(self, raw_crash, dump, crash_id): self.store[crash_id] = raw_crash self.dumps[crash_id] = dump logger = SilentFakeLogger() config = DotDict({ 'logger': logger, 'number_of_threads': 2, 'maximum_queue_size': 2, 'number_of_submissions': "all", 'source': DotDict({'crashstorage_class': None}), 'destination': DotDict({'crashstorage_class': FakeStorageDestination}), 'producer_consumer': DotDict({'producer_consumer_class': ThreadedTaskManager, 'logger': logger, 'number_of_threads': 1, 'maximum_queue_size': 1} ) }) fts_app = CrashMoverApp(config) assert_raises(TypeError, fts_app.main)
def test_doing_work_with_one_worker(self): config = DotDict() config.logger = self.logger config.number_of_threads = 1 config.maximum_queue_size = 1 my_list = [] def insert_into_list(anItem): my_list.append(anItem) ttm = ThreadedTaskManager(config, task_func=insert_into_list ) try: ttm.start() time.sleep(0.2) ok_(len(my_list) == 10, 'expected to do 10 inserts, ' 'but %d were done instead' % len(my_list)) ok_(my_list == range(10), 'expected %s, but got %s' % (range(10), my_list)) ttm.stop() except Exception: # we got threads to join ttm.wait_for_completion() raise
def test_blocking_start(self): config = DotDict() config.logger = self.logger config.idle_delay = 1 config.quit_on_empty_queue = False class MyTaskManager(TaskManager): def _responsive_sleep( self, seconds, wait_log_interval=0, wait_reason='' ): try: if self.count >= 2: self.quit = True self.count += 1 except AttributeError: self.count = 0 tm = MyTaskManager( config, task_func=Mock() ) waiting_func = Mock() tm.blocking_start(waiting_func=waiting_func) eq_( tm.task_func.call_count, 10 ) eq_(waiting_func.call_count, 0)
def setup_mocked_s3_storage(self, executor=TransactionExecutor, executor_for_gets=TransactionExecutor, storage_class='BotoS3CrashStorage', host='', port=0, resource_class=S3ConnectionContext, **extra): config = DotDict({ 'resource_class': resource_class, 'logger': mock.Mock(), 'host': host, 'port': port, 'access_key': 'this is the access key', 'secret_access_key': 'secrets', 'bucket_name': 'silliness', 'keybuilder_class': KeyBuilderBase, 'prefix': 'dev', 'calling_format': mock.Mock() }) config.update(extra) s3_conn = resource_class(config) s3_conn._connect_to_endpoint = mock.Mock() s3_conn._mocked_connection = s3_conn._connect_to_endpoint.return_value s3_conn._calling_format.return_value = mock.Mock() s3_conn._CreateError = mock.Mock() s3_conn.ResponseError = mock.Mock() s3_conn._open = mock.MagicMock() return s3_conn
def test_doing_work_with_two_workers_and_generator(self): config = DotDict() config.logger = self.logger config.number_of_threads = 2 config.maximum_queue_size = 2 my_list = [] def insert_into_list(anItem): my_list.append(anItem) ttm = ThreadedTaskManager(config, task_func=insert_into_list, job_source_iterator=(((x,), {}) for x in xrange(10)) ) try: ttm.start() time.sleep(0.2) ok_(len(ttm.thread_list) == 2, "expected 2 threads, but found %d" % len(ttm.thread_list)) ok_(len(my_list) == 10, 'expected to do 10 inserts, ' 'but %d were done instead' % len(my_list)) ok_(sorted(my_list) == range(10), 'expected %s, but got %s' % (range(10), sorted(my_list))) except Exception: # we got threads to join ttm.wait_for_completion() raise
def test_basic_get_with_parsed_query_string(self, logging_info): # what the middleware app does is that it creates a class based on # another and sets an attribute called `cls` class MadeUp(middleware_app.ImplementationWrapper): cls = AuxImplementation5 all_services = {} config = DotDict( logger=logging, web_server=DotDict( ip_address='127.0.0.1', port='88888' ) ) server = CherryPy(config, ( ('/aux/(.*)', MadeUp), )) testapp = TestApp(server._wsgi_func) response = testapp.get( '/aux/', {'foo': 'bar', 'names': ['peter', 'anders']}, ) eq_(response.status, 200) eq_(json.loads(response.body), {'foo': 'bar', 'names': ['peter', 'anders']}) logging_info.assert_called_with('Running AuxImplementation5')
def test_basic_get(self, logging_info): # what the middleware app does is that it creates a class based on # another and sets an attribute called `cls` class MadeUp(middleware_app.ImplementationWrapper): cls = AuxImplementation1 all_services = {} config = DotDict( logger=logging, web_server=DotDict( ip_address='127.0.0.1', port='88888' ) ) server = CherryPy(config, ( ('/aux/(.*)', MadeUp), )) testapp = TestApp(server._wsgi_func) response = testapp.get('/aux/') eq_(response.status, 200) eq_(json.loads(response.body), {'age': 100}) logging_info.assert_called_with('Running AuxImplementation1') response = testapp.get('/xxxjunkxxx', expect_errors=True) eq_(response.status, 404)
def test_action(self): rc = DotDict() rd = {} pc = DotDict() processor = None skunk_rule = SkunkClassificationRule() ok_(skunk_rule.action(rc, rd, pc, processor))
def create_basic_fake_processor(): fake_processor = DotDict() fake_processor.c_signature_tool = c_signature_tool fake_processor.config = DotDict() # need help figuring out failures? switch to FakeLogger and read stdout fake_processor.config.logger = SilentFakeLogger() #fake_processor.config.logger = FakeLogger() return fake_processor
def test_action(self): rc = DotDict() rd = {} pc = DotDict() processor = None support_rule = SupportClassificationRule() ok_(support_rule.action(rc, rd, pc, processor))
def test_executor_identity(self): config = DotDict() config.logger = self.logger tm = TaskManager( config, job_source_iterator=range(1), ) tm._pid = 666 eq_(tm.executor_identity(), '666-MainThread')
def test_constuctor1(self): config = DotDict() config.logger = self.logger config.quit_on_empty_queue = False tm = TaskManager(config) ok_(tm.config == config) ok_(tm.logger == self.logger) ok_(tm.task_func == default_task_func) ok_(tm.quit == False)
def test_action_case_2(self): """sentinel exsits in stack, plus one secondary""" pc = DotDict() pc.process_type = 'plugin' pijd = copy.deepcopy(cannonical_json_dump) pc.json_dump = pijd pc.json_dump['crashing_thread']['frames'][2]['function'] = \ 'NtUserSetWindowPos' pc.json_dump['crashing_thread']['frames'][4]['function'] = \ 'F_1378698112' f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = SetWindowPos() action_result = rule.action(rc, rd, pc, fake_processor) ok_(action_result) ok_('classifications' in pc) ok_('skunk_works' in pc.classifications) eq_(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')
def test_transaction_ack_crash(self): config = self._setup_config() connection = Mock() ack_token = DotDict() ack_token.delivery_tag = 1 crash_id = 'some-crash-id' crash_store = RabbitMQCrashStorage(config) crash_store._transaction_ack_crash(connection, crash_id, ack_token) connection.channel.basic_ack.assert_called_once_with(delivery_tag=1)
def _fake_processed_crash(self): d = DotDict() # these keys survive redaction d.a = DotDict() d.a.b = DotDict() d.a.b.c = 11 d.sensitive = DotDict() d.sensitive.x = 2 d.not_url = 'not a url' return d
def test_add_classification_to_processed_crash(self): pc = DotDict() pc.classifications = DotDict() support_rule = SupportClassificationRule() support_rule._add_classification(pc, 'stupid', 'extra stuff') ok_('classifications' in pc) ok_('support' in pc.classifications) eq_('stupid', pc.classifications.support.classification) eq_('extra stuff', pc.classifications.support.classification_data) eq_('0.0', pc.classifications.support.classification_version)
def _setup_config(self): config = DotDict() config.transaction_executor_class = Mock() config.backoff_delays = (0, 0, 0) config.logger = Mock() config.rabbitmq_class = MagicMock() config.routing_key = 'socorro.normal' config.filter_on_legacy_processing = True config.redactor_class = Redactor config.forbidden_keys = Redactor.required_config.forbidden_keys.default config.throttle = 100 return config
def test_predicate(self): rc = DotDict() rd = {} pc = DotDict() pc.classifications = DotDict() processor = None support_rule = SupportClassificationRule() ok_(support_rule.predicate(rc, rd, pc, processor)) pc.classifications.support = DotDict() ok_(support_rule.predicate(rc, rd, pc, processor))
def test_no_destination(self): class FakeStorageSource(object): def __init__(self, config, quit_check_callback): self.store = DotDict({'1234': DotDict({'ooid': '1234', 'Product': 'FireSquid', 'Version': '1.0'}), '1235': DotDict({'ooid': '1235', 'Product': 'ThunderRat', 'Version': '1.0'}), '1236': DotDict({'ooid': '1236', 'Product': 'Caminimal', 'Version': '1.0'}), '1237': DotDict({'ooid': '1237', 'Product': 'Fennicky', 'Version': '1.0'}), }) def get_raw_crash(self, ooid): return self.store[ooid] def get_raw_dumps(self, ooid): return {'upload_file_minidump': 'this is a fake dump', 'flash1': 'broken flash dump'} def new_ooids(self): for k in self.store.keys(): yield k logger = SilentFakeLogger() config = DotDict({ 'logger': logger, 'number_of_threads': 2, 'maximum_queue_size': 2, 'number_of_submissions': "all", 'source': DotDict({'crashstorage_class': FakeStorageSource}), 'destination': DotDict({'crashstorage_class': None}), 'producer_consumer': DotDict({'producer_consumer_class': ThreadedTaskManager, 'logger': logger, 'number_of_threads': 1, 'maximum_queue_size': 1} ) }) fts_app = CrashMoverApp(config) assert_raises(TypeError, fts_app.main)
def test_blocking_start_with_quit_on_empty(self): config = DotDict() config.logger = self.logger config.idle_delay = 1 config.quit_on_empty_queue = True tm = TaskManager(config, task_func=Mock()) waiting_func = Mock() tm.blocking_start(waiting_func=waiting_func) eq_(tm.task_func.call_count, 10) eq_(waiting_func.call_count, 0)
def test_action_fail(self): jd = copy.deepcopy(cannonical_json_dump) pc = DotDict() pc.json_dump = jd fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = BitguardClassifier() action_result = rule.action(rc, rd, pc, fake_processor) ok_(not action_result) ok_('classifications' not in pc)
def setup_mocked_s3_storage( self, executor=TransactionExecutor, executor_for_gets=TransactionExecutor, keybuilder_class=KeyBuilderBase, storage_class='BotoS3CrashStorage', bucket_name='mozilla-support-reason', host='', port=0, ): config = DotDict({ 'source': { 'dump_field': 'dump' }, 'transaction_executor_class': executor, 'transaction_executor_class_for_get': executor_for_gets, 'resource_class': S3ConnectionContext, 'keybuilder_class': keybuilder_class, 'backoff_delays': [0, 0, 0], 'redactor_class': Redactor, 'forbidden_keys': Redactor.required_config.forbidden_keys.default, 'logger': mock.Mock(), 'host': host, 'port': port, 'access_key': 'this is the access key', 'secret_access_key': 'secrets', 'temporary_file_system_storage_path': self.TEMPDIR, 'dump_file_suffix': '.dump', 'bucket_name': bucket_name, 'prefix': 'dev', 'calling_format': mock.Mock(), 'json_object_hook': DotDict, }) if isinstance(storage_class, basestring): if storage_class == 'BotoS3CrashStorage': config.bucket_name = 'crash_storage' s3 = BotoS3CrashStorage(config) elif storage_class == 'SupportReasonAPIStorage': s3 = SupportReasonAPIStorage(config) else: s3 = storage_class(config) s3_conn = s3.connection_source s3_conn._connect_to_endpoint = mock.Mock() s3_conn._mocked_connection = s3_conn._connect_to_endpoint.return_value s3_conn._calling_format.return_value = mock.Mock() s3_conn._CreateError = mock.Mock() s3_conn._open = mock.MagicMock() return s3
def test_constuctor1(self): config = DotDict() config.logger = self.logger config.number_of_threads = 1 config.maximum_queue_size = 1 ttm = ThreadedTaskManager(config) try: ok_(ttm.config == config) ok_(ttm.logger == self.logger) ok_(ttm.task_func == default_task_func) ok_(ttm.quit == False) finally: # we got threads to join ttm._kill_worker_threads()
def test_save_raw_crash_no_legacy(self): config = self._setup_config() config.filter_on_legacy_processing = False crash_store = RabbitMQCrashStorage(config) # test for "legacy_processing" missing from crash crash_store.save_raw_crash(raw_crash=DotDict(), dumps=DotDict(), crash_id='crash_id') crash_store.transaction.assert_called_with( crash_store._save_raw_crash_transaction, 'crash_id') config.logger.reset_mock() # test for normal save raw_crash = DotDict() raw_crash.legacy_processing = 0 crash_store.save_raw_crash(raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id') crash_store.transaction.assert_called_with( crash_store._save_raw_crash_transaction, 'crash_id') crash_store.transaction.reset_mock() # test for save without regard to "legacy_processing" value raw_crash = DotDict() raw_crash.legacy_processing = 5 crash_store.save_raw_crash(raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id') crash_store.transaction.assert_called_with( crash_store._save_raw_crash_transaction, 'crash_id')
def test_save_raw_crash_normal(self): config = self._setup_config() crash_store = RabbitMQCrashStorage(config) # test for "legacy_processing" missing from crash crash_store.save_raw_crash(raw_crash=DotDict(), dumps=DotDict(), crash_id='crash_id') ok_(not crash_store.transaction.called) config.logger.reset_mock() # test for normal save raw_crash = DotDict() raw_crash.legacy_processing = 0 crash_store.save_raw_crash(raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id') crash_store.transaction.assert_called_with( crash_store._save_raw_crash_transaction, 'crash_id') crash_store.transaction.reset_mock() # test for save rejection because of "legacy_processing" raw_crash = DotDict() raw_crash.legacy_processing = 5 crash_store.save_raw_crash(raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id') ok_(not crash_store.transaction.called)
def test_statistics_all_missing_prefix(self): d = DotDict() d.statsd_host = 'localhost' d.statsd_port = 666 d.prefix = None d.active_counters_list = ['x', 'y', 'z'] with patch('socorrolib.lib.statistics.StatsClient') as StatsClientMocked: s = StatisticsForStatsd(d, 'processor') StatsClientMocked.assert_called_with( 'localhost', 666, 'processor') s.incr('x') StatsClientMocked.assert_has_calls( StatsClientMocked.mock_calls, [call.incr('processor.x')] ) s.incr('y') StatsClientMocked.assert_has_calls( StatsClientMocked.mock_calls, [call.incr('processor.y')] ) s.incr('z') StatsClientMocked.assert_has_calls( StatsClientMocked.mock_calls, [call.incr('processor.z')] ) s.incr('w') StatsClientMocked.assert_has_calls( StatsClientMocked.mock_calls, [ call.incr('processor.y'), call.incr('processor.x'), call.incr('processor.y') ] ) s.incr(None) StatsClientMocked.assert_has_calls( StatsClientMocked.mock_calls, [ call.incr('processor.y'), call.incr('processor.x'), call.incr('processor.y'), call.incr('processor.unknown') ] )
def test_bogus_source_iter_and_worker(self): class TestFTSAppClass(FetchTransformSaveApp): def __init__(self, config): super(TestFTSAppClass, self).__init__(config) self.the_list = [] def _setup_source_and_destination(self): self.source = Mock() self.destination = Mock() pass def _create_iter(self): for x in xrange(5): yield ((x, ), {}) def transform(self, anItem): self.the_list.append(anItem) logger = SilentFakeLogger() config = DotDict({ 'logger': logger, 'number_of_threads': 2, 'maximum_queue_size': 2, 'number_of_submissions': 'all', 'source': DotDict({'crashstorage_class': None}), 'destination': DotDict({'crashstorage_class': None}), 'producer_consumer': DotDict({ 'producer_consumer_class': TaskManager, 'logger': logger, 'number_of_threads': 1, 'maximum_queue_size': 1 }) }) fts_app = TestFTSAppClass(config) fts_app.main() ok_( len(fts_app.the_list) == 5, 'expected to do 5 inserts, ' 'but %d were done instead' % len(fts_app.the_list)) ok_( sorted(fts_app.the_list) == range(5), 'expected %s, but got %s' % (range(5), sorted(fts_app.the_list)))
def _get_model(overrides=None): config_values = { 'base_url': 'http://crashanalysis.com', 'save_root': '', 'save_download': False, 'save_seconds': 1000, } if overrides: config_values.update(overrides) cls = correlations.CorrelationsSignatures config = DotDict() config.logger = mock.Mock() config.http = DotDict() config.http.correlations = DotDict(config_values) return cls(config=config)
def test_predicate(self): rc = DotDict() rd = {} pc = DotDict() pc.classifications = DotDict() processor = None skunk_rule = SkunkClassificationRule() ok_(skunk_rule.predicate(rc, rd, pc, processor)) pc.classifications.skunk_works = DotDict() ok_(skunk_rule.predicate(rc, rd, pc, processor)) pc.classifications.skunk_works.classification = 'stupid' ok_(not skunk_rule.predicate(rc, rd, pc, processor))
def test_everything_we_hoped_for(self): config = self.get_basic_config() raw_crash = copy.copy(canonical_standard_raw_crash) raw_dumps = {} processed_crash = DotDict() processed_crash.json_dump = copy.copy(cannonical_stackwalker_output) processor_meta = self.get_basic_processor_meta() rule = CrashingThreadRule(config) # the call to be tested rule.act(raw_crash, raw_dumps, processed_crash, processor_meta) eq_(processed_crash.crashedThread, 0)
def _get_model(self, overrides): new_temp_dir = tempfile.mkdtemp() self.temp_dirs.append(new_temp_dir) config_values = { 'base_url': 'http://crashanalysis.com', 'save_root': new_temp_dir, 'save_download': True, 'save_seconds': 1000, } config_values.update(overrides) cls = correlations.Correlations config = DotDict() config.logger = mock.Mock() config.http = DotDict() config.http.correlations = DotDict(config_values) return cls(config=config)
class FakeStorageSource(object): def __init__(self, config, quit_check_callback): self.store = DotDict({'1234': DotDict({'ooid': '1234', 'Product': 'FireSquid', 'Version': '1.0'}), '1235': DotDict({'ooid': '1235', 'Product': 'ThunderRat', 'Version': '1.0'}), '1236': DotDict({'ooid': '1236', 'Product': 'Caminimal', 'Version': '1.0'}), '1237': DotDict({'ooid': '1237', 'Product': 'Fennicky', 'Version': '1.0'}), }) self.number_of_close_calls = 0 def close(): self.number_of_close_calls += 1 def get_raw_crash(self, ooid): return self.store[ooid] def get_raw_dumps(self, ooid): return {'upload_file_minidump': 'this is a fake dump', 'flash1': 'broken flash dump'} def new_crashes(self): for k in self.store.keys(): yield k def close(self): self.number_of_close_calls += 1 pass
def test_stuff_missing(self): config = self.get_basic_config() raw_crash = copy.copy(canonical_standard_raw_crash) del raw_crash.uuid expected_raw_crash = copy.copy(raw_crash) raw_dumps = {} processed_crash = DotDict() processor_meta = self.get_basic_processor_meta() rule = IdentifierRule(config) # the call to be tested result = rule.act( raw_crash, raw_dumps, processed_crash, processor_meta ) eq_(result, (True, False)) # raw crash should be unchanged eq_(raw_crash, expected_raw_crash)
class SubmitterFileSystemWalkerSource(CrashStorageBase): """This is a crashstorage derivative that can walk an arbitrary file system path looking for crashes. The new_crashes generator yields pathnames rather than crash_ids - so it is not compatible with other instances of the CrashStorageSystem.""" required_config = Namespace() required_config.add_option( 'search_root', doc="a filesystem location to begin a search for raw crash/dump sets", short_form='s', default=None) required_config.add_option('dump_suffix', doc="the standard file extension for dumps", default='.dump') required_config.add_option('dump_field', doc="the default name for the main dump", default='upload_file_minidump') #-------------------------------------------------------------------------- def __init__(self, config, quit_check_callback=None): if isinstance(quit_check_callback, basestring): # this class is being used as a 'new_crash_source' and the name # of the app has been passed - we can ignore it quit_check_callback = None super(SubmitterFileSystemWalkerSource, self).__init__(config, quit_check_callback) #-------------------------------------------------------------------------- def get_raw_crash(self, (prefix, path_tuple)): """the default implemntation of fetching a raw_crash parameters: path_tuple - a tuple of paths. the first element is the raw_crash pathname""" with open(path_tuple[0]) as raw_crash_fp: return DotDict(json.load(raw_crash_fp))
class FakeStorageSource(object): def __init__(self, config, quit_check_callback): self.store = DotDict({'1234': DotDict({'ooid': '1234', 'Product': 'FireSquid', 'Version': '1.0'}), '1235': DotDict({'ooid': '1235', 'Product': 'ThunderRat', 'Version': '1.0'}), '1236': DotDict({'ooid': '1236', 'Product': 'Caminimal', 'Version': '1.0'}), '1237': DotDict({'ooid': '1237', 'Product': 'Fennicky', 'Version': '1.0'}), }) def get_raw_crash(self, ooid): return self.store[ooid] def get_raw_dump(self, ooid): return 'this is a fake dump' def new_ooids(self): for k in self.store.keys(): yield k
def test_action_case_3(self): """failure - no targets found in top 5 frames of stack""" pc = DotDict() f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = Bug812318() action_result = rule.action(rc, rd, pc, fake_processor) ok_(not action_result) ok_(not 'classifications' in pc)
def test_action_failure(self): """success - target signature not found""" pc = DotDict() f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd pc.upload_file_minidump_flash2.signature = 'lars was here' fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = Bug811804() action_result = rule.action(rc, rd, pc, fake_processor) ok_(not action_result) ok_(not 'classifications' in pc)
def test_get_processed(self): faked_hb_row_object = DotDict() faked_hb_row_object.columns = DotDict() faked_hb_row_object.columns['processed_data:json'] = DotDict() faked_hb_row_object.columns['processed_data:json'].value = \ self._fake_unredacted_processed_crash_as_string() processed_crash = DotDict() with self.storage.hbase() as conn: conn.client.getRowWithColumns.return_value = [faked_hb_row_object] processed_crash = self.storage.get_processed( "936ce666-ff3b-4c7a-9674-367fe2120408" ) eq_( processed_crash, self._fake_redacted_processed_crash() )
def test_action_case_1(self): """success - target found in top 5 frames of stack""" pc = DotDict() f2jd = copy.deepcopy(cannonical_json_dump) pc.upload_file_minidump_flash2 = DotDict() pc.upload_file_minidump_flash2.json_dump = f2jd pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][2] \ ['function'] = 'NtAlpcSendWaitReceivePort' fake_processor = create_basic_fake_processor() rc = DotDict() rd = {} rule = SendWaitReceivePort() action_result = rule.action(rc, rd, pc, fake_processor) ok_(action_result) ok_('classifications' in pc)
def test_stuff_missing(self): config = self.get_basic_config() raw_crash = copy.copy(canonical_standard_raw_crash) raw_dumps = {} processed_crash = DotDict() processed_crash.json_dump = {} processor_meta = self.get_basic_processor_meta() rule = CrashingThreadRule(config) # the call to be tested rule.act(raw_crash, raw_dumps, processed_crash, processor_meta) eq_(processed_crash.crashedThread, None) eq_( processor_meta.processor_notes, ['MDSW did not identify the crashing thread'] )