def test_missing_crashes(self, mock_futures, boto_helper): """Verify it finds a missing crash.""" boto_helper.get_or_create_bucket('crashstats') # Create a raw and processed crash crashid_1 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid_1[0:3], TODAY, crashid_1), value='test') boto_helper.set_contents_from_string(bucket_name='crashstats', key='/v1/processed_crash/%s' % crashid_1, value='test') # Create a raw crash crashid_2 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid_2[0:3], TODAY, crashid_2), value='test') with self.get_app() as app: missing = app.find_missing(TODAY) assert missing == [crashid_2]
def test_missing_crashes(self, mock_futures, boto_helper): """Verify it finds a missing crash.""" boto_helper.get_or_create_bucket('crashstats') # Create a raw and processed crash crashid_1 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid_1[0:3], TODAY, crashid_1), value='test' ) boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v1/processed_crash/%s' % crashid_1, value='test' ) # Create a raw crash crashid_2 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid_2[0:3], TODAY, crashid_2), value='test' ) with self.get_app() as app: missing = app.find_missing(TODAY) assert missing == [crashid_2]
def test_iter(self, sqs_helper): standard_crash = create_new_ooid() sqs_helper.publish("standard", standard_crash) reprocessing_crash = create_new_ooid() sqs_helper.publish("reprocessing", reprocessing_crash) priority_crash = create_new_ooid() sqs_helper.publish("priority", priority_crash) crash_queue = SQSCrashQueue(get_sqs_config()) new_crashes = list(crash_queue.new_crashes()) # Assert the shape of items in new_crashes for item in new_crashes: assert isinstance(item, tuple) assert isinstance(item[0], tuple) # *args assert isinstance(item[1], dict) # **kwargs assert list(item[1].keys()) == ["finished_func"] # Assert new_crashes order is the correct order crash_ids = [item[0][0] for item in new_crashes] assert crash_ids == [ priority_crash, standard_crash, reprocessing_crash ]
def testCreateNewOoid(self): ooid = oo.create_new_ooid() ndate = oo.dateFromOoid(ooid) ndepth = oo.depthFromOoid(ooid) assert self.nowstamp == ndate, 'Expect date of %s, got %s' %(self.nowstamp,ndate) assert oo.defaultDepth == ndepth, 'Expect default depth (%d) got %d' % (oo.defaultDepth,ndepth) ooid = oo.create_new_ooid(timestamp=self.xmas05) ndate = oo.dateFromOoid(ooid) ndepth = oo.depthFromOoid(ooid) assert self.xmas05 == ndate, 'Expect date of %s, got %s' %(self.xmas05,ndate) assert oo.defaultDepth == ndepth, 'Expect default depth (%d) got %d' % (oo.defaultDepth,ndepth) for d in range(1,5): ooid0 = oo.create_new_ooid(depth=d) ooid1 = oo.create_new_ooid(timestamp=self.xmas05,depth=d) ndate0 = oo.dateFromOoid(ooid0) ndepth0 = oo.depthFromOoid(ooid0) ndate1 = oo.dateFromOoid(ooid1) ndepth1 = oo.depthFromOoid(ooid1) assert self.nowstamp == ndate0, 'Expect date of %s, got %s' %(self.nowstamp,ndate0) assert self.xmas05 == ndate1, 'Expect date of %s, got %s' %(self.xmas05,ndate1) assert ndepth0 == ndepth1, 'Expect depth0(%d) == depth1(%d)' %(ndepth0,ndepth1) assert d == ndepth0, 'Expect depth %d, got %d' % (d,ndepth0) assert None == oo.depthFromOoid(self.badooid0) assert None == oo.depthFromOoid(self.badooid1)
def testCreateNewOoid(self): new_ooid = ooid.create_new_ooid() ndate = ooid.dateFromOoid(new_ooid) ndepth = ooid.depthFromOoid(new_ooid) assert self.nowstamp == ndate assert ooid.defaultDepth == ndepth new_ooid = ooid.create_new_ooid(timestamp=self.xmas05) ndate = ooid.dateFromOoid(new_ooid) ndepth = ooid.depthFromOoid(new_ooid) assert self.xmas05 == ndate assert ooid.defaultDepth == ndepth for d in range(1, 5): ooid0 = ooid.create_new_ooid(depth=d) ooid1 = ooid.create_new_ooid(timestamp=self.xmas05, depth=d) ndate0 = ooid.dateFromOoid(ooid0) ndepth0 = ooid.depthFromOoid(ooid0) ndate1 = ooid.dateFromOoid(ooid1) ndepth1 = ooid.depthFromOoid(ooid1) assert self.nowstamp == ndate0 assert self.xmas05 == ndate1 assert ndepth0 == ndepth1 assert d == ndepth0 assert ooid.depthFromOoid(self.badooid0) is None assert ooid.depthFromOoid(self.badooid1) is None
def test_missing_crashes(self, boto_helper): """Verify it finds a missing crash.""" boto_helper.get_or_create_bucket(BUCKET_NAME) # Create a raw and processed crash crashid_1 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name=BUCKET_NAME, key="/v2/raw_crash/%s/%s/%s" % (crashid_1[0:3], TODAY, crashid_1), value="test", ) boto_helper.set_contents_from_string( bucket_name=BUCKET_NAME, key="/v1/processed_crash/%s" % crashid_1, value="test", ) # Create a raw crash crashid_2 = create_new_ooid() boto_helper.set_contents_from_string( bucket_name=BUCKET_NAME, key="/v2/raw_crash/%s/%s/%s" % (crashid_2[0:3], TODAY, crashid_2), value="test", ) cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == [crashid_2]
def test_no_missing_crashes(self, boto_helper, monkeypatch): """Verify raw crashes with processed crashes result in no missing crashes.""" monkeypatch.setattr(Command, "get_entropy", get_small_entropy) bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] boto_helper.create_bucket(bucket) # Create a few raw and processed crashes crashids = [ "000" + create_new_ooid()[3:], "000" + create_new_ooid()[3:], "000" + create_new_ooid()[3:], ] for crashid in crashids: boto_helper.upload_fileobj( bucket_name=BUCKET_NAME, key="v2/raw_crash/%s/%s/%s" % (crashid[0:3], TODAY, crashid), data=b"test", ) boto_helper.upload_fileobj( bucket_name=BUCKET_NAME, key="v1/processed_crash/%s" % crashid, data=b"test", ) cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == []
def test_missing_crashes(self, boto_helper, monkeypatch): """Verify it finds a missing crash.""" monkeypatch.setattr(Command, "get_entropy", get_small_entropy) bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] boto_helper.create_bucket(bucket) # Create a raw and processed crash crashid_1 = "000" + create_new_ooid()[3:] boto_helper.upload_fileobj( bucket_name=BUCKET_NAME, key="v2/raw_crash/%s/%s/%s" % (crashid_1[0:3], TODAY, crashid_1), data=b"test", ) boto_helper.upload_fileobj( bucket_name=BUCKET_NAME, key="v1/processed_crash/%s" % crashid_1, data=b"test", ) # Create a raw crash crashid_2 = "000" + create_new_ooid()[3:] boto_helper.upload_fileobj( bucket_name=BUCKET_NAME, key="v2/raw_crash/%s/%s/%s" % (crashid_2[0:3], TODAY, crashid_2), data=b"test", ) cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == [crashid_2]
def test_no_missing_crashes(self, boto_helper): """Verify raw crashes with processed crashes result in no missing crashes.""" boto_helper.get_or_create_bucket(BUCKET_NAME) # Create a couple raw and processed crashes crashids = [ create_new_ooid(), create_new_ooid(), create_new_ooid(), ] for crashid in crashids: boto_helper.set_contents_from_string( bucket_name=BUCKET_NAME, key='/v2/raw_crash/%s/%s/%s' % (crashid[0:3], TODAY, crashid), value='test' ) boto_helper.set_contents_from_string( bucket_name=BUCKET_NAME, key='/v1/processed_crash/%s' % crashid, value='test' ) cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == []
def testCreateNewOoid(self): ooid = oo.create_new_ooid() ndate = oo.dateFromOoid(ooid) ndepth = oo.depthFromOoid(ooid) assert self.nowstamp == ndate, 'Expect date of %s, got %s' % ( self.nowstamp, ndate) assert oo.defaultDepth == ndepth, 'Expect default depth (%d) got %d' % ( oo.defaultDepth, ndepth) ooid = oo.create_new_ooid(timestamp=self.xmas05) ndate = oo.dateFromOoid(ooid) ndepth = oo.depthFromOoid(ooid) assert self.xmas05 == ndate, 'Expect date of %s, got %s' % ( self.xmas05, ndate) assert oo.defaultDepth == ndepth, 'Expect default depth (%d) got %d' % ( oo.defaultDepth, ndepth) for d in range(1, 5): ooid0 = oo.create_new_ooid(depth=d) ooid1 = oo.create_new_ooid(timestamp=self.xmas05, depth=d) ndate0 = oo.dateFromOoid(ooid0) ndepth0 = oo.depthFromOoid(ooid0) ndate1 = oo.dateFromOoid(ooid1) ndepth1 = oo.depthFromOoid(ooid1) assert self.nowstamp == ndate0, 'Expect date of %s, got %s' % ( self.nowstamp, ndate0) assert self.xmas05 == ndate1, 'Expect date of %s, got %s' % ( self.xmas05, ndate1) assert ndepth0 == ndepth1, 'Expect depth0(%d) == depth1(%d)' % ( ndepth0, ndepth1) assert d == ndepth0, 'Expect depth %d, got %d' % (d, ndepth0) assert None == oo.depthFromOoid(self.badooid0) assert None == oo.depthFromOoid(self.badooid1)
def test_iter(self): manager = get_config_manager() with manager.context() as config: pubsub_helper = PubSubHelper(config) with pubsub_helper as pubsub: standard_crash = create_new_ooid() pubsub.publish('standard', standard_crash) reprocessing_crash = create_new_ooid() pubsub.publish('reprocessing', reprocessing_crash) priority_crash = create_new_ooid() pubsub.publish('priority', priority_crash) crash_queue = PubSubCrashQueue(config) new_crashes = list(crash_queue.new_crashes()) # Assert the shape of items in new_crashes for item in new_crashes: assert isinstance(item, tuple) assert isinstance(item[0], tuple) # *args assert isinstance(item[1], dict) # **kwargs assert list(item[1].keys()) == ['finished_func'] # Assert new_crashes order is the correct order crash_ids = [item[0][0] for item in new_crashes] assert crash_ids == [ priority_crash, standard_crash, reprocessing_crash ]
def test_no_missing_crashes(self, mock_futures, boto_helper): """Verify raw crashes with processed crashes result in no missing crashes.""" boto_helper.get_or_create_bucket('crashstats') # Create a couple raw and processed crashes crashids = [ create_new_ooid(), create_new_ooid(), create_new_ooid(), ] for crashid in crashids: boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid[0:3], TODAY, crashid), value='test' ) boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v1/processed_crash/%s' % crashid, value='test' ) with self.get_app() as app: missing = app.find_missing(TODAY) assert missing == []
def test_iter(self): manager = get_config_manager() with manager.context() as config: pubsub_helper = PubSubHelper(config) with pubsub_helper as pubsub: standard_crash = create_new_ooid() pubsub.publish('standard', standard_crash) reprocessing_crash = create_new_ooid() pubsub.publish('reprocessing', reprocessing_crash) priority_crash = create_new_ooid() pubsub.publish('priority', priority_crash) crash_queue = PubSubCrashQueue(config) new_crashes = list(crash_queue.new_crashes()) # Assert the shape of items in new_crashes for item in new_crashes: assert isinstance(item, tuple) assert isinstance(item[0], tuple) # *args assert isinstance(item[1], dict) # **kwargs assert list(item[1].keys()) == ['finished_func'] # Assert new_crashes order is the correct order crash_ids = [item[0][0] for item in new_crashes] assert crash_ids == [priority_crash, standard_crash, reprocessing_crash]
def test_handle_missing_some_missing(self, capsys, db): crash_ids = [create_new_ooid(), create_new_ooid()] crash_ids.sort() cmd = Command() cmd.handle_missing(TODAY, crash_ids) captured = capsys.readouterr() assert "Missing: %s" % crash_ids[0] in captured.out assert "Missing: %s" % crash_ids[1] in captured.out assert crash_ids == list(self.fetch_crashids())
def test_publish_many(self, sqs_helper, queue): crash_id_1 = create_new_ooid() crash_id_2 = create_new_ooid() crash_id_3 = create_new_ooid() crash_queue = SQSCrashQueue(get_sqs_config()) crash_queue.publish(queue, [crash_id_1, crash_id_2]) crash_queue.publish(queue, [crash_id_3]) published_crash_ids = sqs_helper.get_published_crashids(queue) assert sorted(published_crash_ids) == sorted( [crash_id_1, crash_id_2, crash_id_3])
def test_telemetry_has_crash(self, client): uuid = create_new_ooid() crash_data = { "platform": "Linux", "signature": "now_this_is_a_signature", "uuid": uuid, } boto_helper = BotoHelper() boto_helper.get_or_create_bucket("telemetry-test") boto_helper.set_contents_from_string( bucket_name="telemetry-test", key="/v1/crash_report/20%s/%s" % (uuid[-6:], uuid), value=json.dumps(crash_data), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_telemetry_crash": True, "s3_raw_crash": False, "s3_processed_crash": False, "elasticsearch_crash": False, }
def test_processed_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'signature': '[@signature]', 'uuid': uuid, 'completeddatetime': '2018-03-14 10:56:50.902884', } boto_helper = BotoHelper() boto_helper.get_or_create_bucket('crashstats-test') boto_helper.set_contents_from_string(bucket_name='crashstats-test', key='/v1/processed_crash/%s' % uuid, value=json.dumps(crash_data)) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert (data == { u'uuid': uuid, u's3_processed_crash': True, u's3_raw_crash': False, u'elasticsearch_crash': False, u's3_telemetry_crash': False, })
def test_telemetry_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'platform': 'Linux', 'signature': 'now_this_is_a_signature', 'uuid': uuid } boto_helper = BotoHelper() boto_helper.get_or_create_bucket('telemetry-test') boto_helper.set_contents_from_string(bucket_name='telemetry-test', key='/v1/crash_report/20%s/%s' % (uuid[-6:], uuid), value=json.dumps(crash_data)) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert (data == { u'uuid': uuid, u's3_telemetry_crash': True, u's3_raw_crash': False, u's3_processed_crash': False, u'elasticsearch_crash': False, })
def test_publish(capsys): pika_path = 'socorro.scripts.add_crashid_to_queue.pika' with patch(pika_path) as mock_pika_module: conn = MagicMock() mock_pika_module.BlockingConnection.return_value = conn channel = MagicMock() conn.channel.return_value = channel crash_id = create_new_ooid() exit_code = main(['socorro.normal', crash_id]) assert exit_code == 0 # Assert the connection was created correctly assert mock_pika_module.ConnectionParameters.call_count == 1 kwargs = mock_pika_module.ConnectionParameters.mock_calls[0][2] # FIXME(willkg): a better way might be to mock os.environ and then provide values that we # can assert with more confidence here assert kwargs['host'] == os.environ['resource.rabbitmq.host'] assert kwargs['port'] == int(os.environ.get('resource.rabbitmq.port', '5672')) assert kwargs['virtual_host'] == os.environ['resource.rabbitmq.virtual_host'] # Assert there was one call to basic_publish and check the important arguments which are # passed as kwargs assert channel.basic_publish.call_count == 1 args, kwargs = channel.basic_publish.call_args assert kwargs['routing_key'] == 'socorro.normal' assert kwargs['body'] == crash_id
def test_raw_crash_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'submitted_timestamp': '2018-03-14-09T22:21:18.646733+00:00' } boto_helper = BotoHelper() raw_crash_key = '/v2/raw_crash/%s/20%s/%s' % (uuid[0:3], uuid[-6:], uuid) boto_helper.get_or_create_bucket('crashstats-test') boto_helper.set_contents_from_string(bucket_name='crashstats-test', key=raw_crash_key, value=json.dumps(crash_data)) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert (data == { u'uuid': uuid, u's3_raw_crash': True, u's3_processed_crash': False, u'elasticsearch_crash': False, u's3_telemetry_crash': False, })
def test_processed_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'signature': '[@signature]', 'uuid': uuid, 'completeddatetime': '2018-03-14 10:56:50.902884', } boto_helper = BotoHelper() boto_helper.get_or_create_bucket('crashstats-test') boto_helper.set_contents_from_string( bucket_name='crashstats-test', key='/v1/processed_crash/%s' % uuid, value=json.dumps(crash_data) ) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert ( data == { u'uuid': uuid, u's3_processed_crash': True, u's3_raw_crash': False, u'elasticsearch_crash': False, u's3_telemetry_crash': False, } )
def test_telemetry_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'platform': 'Linux', 'signature': 'now_this_is_a_signature', 'uuid': uuid } boto_helper = BotoHelper() boto_helper.get_or_create_bucket('telemetry-test') boto_helper.set_contents_from_string( bucket_name='telemetry-test', key='/v1/crash_report/20%s/%s' % (uuid[-6:], uuid), value=json.dumps(crash_data) ) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert ( data == { u'uuid': uuid, u's3_telemetry_crash': True, u's3_raw_crash': False, u's3_processed_crash': False, u'elasticsearch_crash': False, } )
def test_ack(self): original_crash_id = create_new_ooid() manager = get_config_manager() with manager.context() as config: pubsub_helper = PubSubHelper(config) with pubsub_helper as pubsub: # Publish crash id to the queue pubsub.publish('standard', original_crash_id) crash_queue = PubSubCrashQueue(config) new_crashes = list(crash_queue.new_crashes()) # Assert original_crash_id is in new_crashes crash_ids = [item[0][0] for item in new_crashes] assert crash_ids == [original_crash_id] # Now call it again; note that we haven't acked the crash_ids # nor have the leases expired second_new_crashes = list(crash_queue.new_crashes()) assert second_new_crashes == [] # Now ack the crash_id and we don't get it again for args, kwargs in new_crashes: kwargs['finished_func']() # Wait beyond the ack deadline in the grossest way possible time.sleep(ACK_DEADLINE + 1) # Now call it again and make sure we get nothing back new_crashes = list(crash_queue.new_crashes()) assert new_crashes == []
def test_raw_crash_has_crash(self, client): uuid = create_new_ooid() crash_data = { 'submitted_timestamp': '2018-03-14-09T22:21:18.646733+00:00' } boto_helper = BotoHelper() raw_crash_key = '/v2/raw_crash/%s/20%s/%s' % (uuid[0:3], uuid[-6:], uuid) boto_helper.get_or_create_bucket('crashstats-test') boto_helper.set_contents_from_string( bucket_name='crashstats-test', key=raw_crash_key, value=json.dumps(crash_data) ) with self.supersearch_returns_crashes([]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert ( data == { u'uuid': uuid, u's3_raw_crash': True, u's3_processed_crash': False, u'elasticsearch_crash': False, u's3_telemetry_crash': False, } )
def test_publish(capsys): pika_path = 'socorro.scripts.add_crashid_to_queue.pika' with patch(pika_path) as mock_pika_module: conn = MagicMock() mock_pika_module.BlockingConnection.return_value = conn channel = MagicMock() conn.channel.return_value = channel crash_id = create_new_ooid() exit_code = main(['socorro.normal', crash_id]) assert exit_code == 0 # Assert the connection was created correctly assert mock_pika_module.ConnectionParameters.call_count == 1 kwargs = mock_pika_module.ConnectionParameters.mock_calls[0][2] # FIXME(willkg): a better way might be to mock os.environ and then provide values that we # can assert with more confidence here assert kwargs['host'] == os.environ['resource.rabbitmq.host'] assert kwargs['port'] == int( os.environ.get('resource.rabbitmq.port', '5672')) assert kwargs['virtual_host'] == os.environ[ 'resource.rabbitmq.virtual_host'] # Assert there was one call to basic_publish and check the important arguments which are # passed as kwargs assert channel.basic_publish.call_count == 1 args, kwargs = channel.basic_publish.call_args assert kwargs['routing_key'] == 'socorro.normal' assert kwargs['body'] == crash_id
def test_raw_crash_has_crash(self, client): uuid = create_new_ooid() crash_data = { "submitted_timestamp": "2018-03-14-09T22:21:18.646733+00:00" } boto_helper = BotoHelper() raw_crash_key = "/v2/raw_crash/%s/20%s/%s" % (uuid[0:3], uuid[-6:], uuid) boto_helper.get_or_create_bucket("crashstats-test") boto_helper.set_contents_from_string( bucket_name="crashstats-test", key=raw_crash_key, value=json.dumps(crash_data), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_raw_crash": True, "s3_processed_crash": False, "elasticsearch_crash": False, "s3_telemetry_crash": False, }
def test_processed_has_crash(self, client): uuid = create_new_ooid() crash_data = { "signature": "[@signature]", "uuid": uuid, "completeddatetime": "2018-03-14 10:56:50.902884", } boto_helper = BotoHelper() boto_helper.get_or_create_bucket("crashstats-test") boto_helper.set_contents_from_string( bucket_name="crashstats-test", key="/v1/processed_crash/%s" % uuid, value=json.dumps(crash_data), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_processed_crash": True, "s3_raw_crash": False, "elasticsearch_crash": False, "s3_telemetry_crash": False, }
def test_ack(self, sqs_helper): original_crash_id = create_new_ooid() # Publish crash id to the queue sqs_helper.publish("standard", original_crash_id) crash_queue = SQSCrashQueue(get_sqs_config()) new_crashes = list(crash_queue.new_crashes()) # Assert original_crash_id is in new_crashes crash_ids = [item[0][0] for item in new_crashes] assert crash_ids == [original_crash_id] # Now call it again; note that we haven't acked the crash_ids # nor have the leases expired second_new_crashes = list(crash_queue.new_crashes()) assert second_new_crashes == [] # Now ack the crash_id and we don't get it again for args, kwargs in new_crashes: kwargs["finished_func"]() time.sleep(VISIBILITY_TIMEOUT + 1) # Now call it again and make sure we get nothing back new_crashes = list(crash_queue.new_crashes()) assert new_crashes == []
def test_telemetry_has_crash(self, boto_helper, client): self.create_s3_buckets(boto_helper) uuid = create_new_ooid() crash_data = { "platform": "Linux", "signature": "now_this_is_a_signature", "uuid": uuid, } telemetry_bucket = settings.SOCORRO_CONFIG["telemetrydata"][ "bucket_name"] boto_helper.upload_fileobj( bucket_name=telemetry_bucket, key="v1/crash_report/20%s/%s" % (uuid[-6:], uuid), data=json.dumps(crash_data).encode("utf-8"), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_telemetry_crash": True, "s3_raw_crash": False, "s3_processed_crash": False, "elasticsearch_crash": False, }
def test_processed_has_crash(self, boto_helper, client): self.create_s3_buckets(boto_helper) uuid = create_new_ooid() crash_data = { "signature": "[@signature]", "uuid": uuid, "completeddatetime": "2018-03-14 10:56:50.902884", } bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] boto_helper.upload_fileobj( bucket_name=bucket, key="v1/processed_crash/%s" % uuid, data=json.dumps(crash_data).encode("utf-8"), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_processed_crash": True, "s3_raw_crash": False, "elasticsearch_crash": False, "s3_telemetry_crash": False, }
def test_Reprocessing(self): crash_id = create_new_ooid() def mocked_publish(queue, crash_ids): assert queue == "reprocessing" assert crash_ids == [crash_id] return True Reprocessing.implementation().publish = mocked_publish url = reverse("api:model_wrapper", args=("Reprocessing", )) response = self.client.get(url) assert response.status_code == 403 params = {"crash_ids": crash_id} response = self.client.get(url, params, HTTP_AUTH_TOKEN="somecrap") assert response.status_code == 403 user = User.objects.create(username="******") self._add_permission(user, "reprocess_crashes") perm = Permission.objects.get(codename="reprocess_crashes") # but make a token that only has the 'reprocess_crashes' # permission associated with it token = Token.objects.create(user=user, notes="Only reprocessing") token.permissions.add(perm) response = self.client.get(url, params, HTTP_AUTH_TOKEN=token.key) assert response.status_code == 405 response = self.client.post(url, params, HTTP_AUTH_TOKEN=token.key) assert response.status_code == 200 assert json.loads(response.content) is True
def test_raw_crash_has_crash(self, boto_helper, client): self.create_s3_buckets(boto_helper) uuid = create_new_ooid() crash_data = { "submitted_timestamp": "2018-03-14-09T22:21:18.646733+00:00" } bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] raw_crash_key = "v2/raw_crash/%s/20%s/%s" % (uuid[0:3], uuid[-6:], uuid) boto_helper.upload_fileobj( bucket_name=bucket, key=raw_crash_key, data=json.dumps(crash_data).encode("utf-8"), ) with self.supersearch_returns_crashes([]): url = reverse("api:crash_verify") resp = client.get(url, {"crash_id": uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert data == { "uuid": uuid, "s3_raw_crash": True, "s3_processed_crash": False, "elasticsearch_crash": False, "s3_telemetry_crash": False, }
def test_publish_one(self, sqs_helper, queue): crash_id = create_new_ooid() crash_queue = SQSCrashQueue(get_sqs_config()) crash_queue.publish(queue, [crash_id]) published_crash_ids = sqs_helper.get_published_crashids(queue) assert published_crash_ids == [crash_id]
def test_handle_missing_some_missing(self, caplogpp, db_conn): caplogpp.set_level('DEBUG') crash_ids = [ create_new_ooid(), create_new_ooid(), ] crash_ids.sort() with self.get_app() as app: app.handle_missing(TODAY, crash_ids) recs = [rec.message for rec in caplogpp.records] assert 'Missing: %s' % crash_ids[0] in recs assert 'Missing: %s' % crash_ids[1] in recs crash_ids_in_db = [item['crash_id'] for item in self.fetch_crashids(db_conn)] crash_ids_in_db.sort() assert crash_ids == crash_ids_in_db
def test_handle_missing_some_missing(self, caplogpp, db_conn): caplogpp.set_level('DEBUG') crash_ids = [ create_new_ooid(), create_new_ooid(), ] crash_ids.sort() with self.get_app() as app: app.handle_missing(TODAY, crash_ids) recs = [rec.message for rec in caplogpp.records] assert 'Missing: %s' % crash_ids[0] in recs assert 'Missing: %s' % crash_ids[1] in recs crash_ids_in_db = [ item['crash_id'] for item in self.fetch_crashids(db_conn) ] crash_ids_in_db.sort() assert crash_ids == crash_ids_in_db
def test_missing_crashes(self, boto_helper, es_conn, monkeypatch): """Verify it finds a missing crash.""" monkeypatch.setattr(Command, "get_entropy", get_small_entropy) bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] boto_helper.create_bucket(bucket) # Create a raw and processed crash crash_id_1 = "000" + create_new_ooid()[3:] self.create_raw_crash_in_s3(boto_helper, crash_id_1) self.create_processed_crash_in_s3(boto_helper, crash_id_1) self.create_processed_crash_in_es(es_conn, crash_id_1) # Create a raw crash crash_id_2 = "000" + create_new_ooid()[3:] self.create_raw_crash_in_s3(boto_helper, crash_id_2) cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == [crash_id_2]
def test_past_missing_still_missing(self, capsys, db): # Create a MissingProcessedCrash row, but don't put the processed crash in the # bucket. After check_past_missing() runs, the MissingProcessedCrash should # still have is_processed=False. crash_id = create_new_ooid() mpe = MissingProcessedCrash(crash_id=crash_id, is_processed=False) mpe.save() cmd = Command() cmd.check_past_missing() mpe = MissingProcessedCrash.objects.get(crash_id=crash_id) assert mpe.is_processed is False
def test_no_missing_crashes(self, mock_futures, boto_helper): """Verify raw crashes with processed crashes result in no missing crashes.""" boto_helper.get_or_create_bucket('crashstats') # Create a couple raw and processed crashes crashids = [ create_new_ooid(), create_new_ooid(), create_new_ooid(), ] for crashid in crashids: boto_helper.set_contents_from_string( bucket_name='crashstats', key='/v2/raw_crash/%s/%s/%s' % (crashid[0:3], TODAY, crashid), value='test') boto_helper.set_contents_from_string(bucket_name='crashstats', key='/v1/processed_crash/%s' % crashid, value='test') with self.get_app() as app: missing = app.find_missing(TODAY) assert missing == []
def test_no_missing_crashes(self, boto_helper, es_conn, monkeypatch): """Verify raw crashes with processed crashes result in no missing crashes.""" monkeypatch.setattr(Command, "get_entropy", get_small_entropy) bucket = settings.SOCORRO_CONFIG["resource"]["boto"]["bucket_name"] boto_helper.create_bucket(bucket) # Create a few raw and processed crashes crashids = [ "000" + create_new_ooid()[3:], "000" + create_new_ooid()[3:], "000" + create_new_ooid()[3:], ] for crash_id in crashids: self.create_raw_crash_in_s3(boto_helper, crash_id) self.create_processed_crash_in_s3(boto_helper, crash_id) self.create_processed_crash_in_es(es_conn, crash_id) es_conn.refresh() cmd = Command() missing = cmd.find_missing(num_workers=1, date=TODAY) assert missing == []
def test_elastcsearch_has_crash(self, client): uuid = create_new_ooid() with self.supersearch_returns_crashes([uuid]): url = reverse('api:crash_verify') resp = client.get(url, {'crash_id': uuid}) assert resp.status_code == 200 data = json.loads(resp.content) assert ( data == { u'uuid': uuid, u'elasticsearch_crash': True, u's3_raw_crash': False, u's3_processed_crash': False, u's3_telemetry_crash': False, } )