Esempio n. 1
0
    def test_monitor_measures(self):
        result = monitor_measures.delay().get()
        self.check_stats(gauge=[('table.cell_measure', 1),
                                ('table.wifi_measure', 1)], )
        self.assertEqual(result, {'cell_measure': -1, 'wifi_measure': -1})

        # add some observations
        CellObservationFactory.create_batch(3)
        WifiObservationFactory.create_batch(5)
        self.session.flush()

        result = monitor_measures.delay().get()
        self.check_stats(gauge=[('table.cell_measure', 2),
                                ('table.wifi_measure', 2)], )
        self.assertEqual(result, {'cell_measure': 3, 'wifi_measure': 5})
Esempio n. 2
0
    def test_created_from_blocklist_time(self):
        now = util.utcnow()
        last_week = now - TEMPORARY_BLOCKLIST_DURATION - timedelta(days=1)

        obs = CellObservationFactory.build()
        self.session.add(
            CellBlocklist(time=last_week, count=1,
                          radio=obs.radio, mcc=obs.mcc,
                          mnc=obs.mnc, lac=obs.lac, cid=obs.cid))
        self.session.flush()

        # add a new entry for the previously blocklisted cell
        self.data_queue.enqueue([obs])
        self.assertEqual(self.data_queue.size(), 1)
        update_cell.delay().get()

        # the cell was inserted again
        cells = self.session.query(Cell).all()
        self.assertEqual(len(cells), 1)

        # and the creation date was set to the date of the blocklist entry
        self.assertEqual(cells[0].created, last_week)

        self.check_statcounter(StatKey.cell, 1)
        self.check_statcounter(StatKey.unique_cell, 0)
Esempio n. 3
0
    def test_blocklist(self):
        now = util.utcnow()
        today = now.date()
        observations = CellObservationFactory.build_batch(3)
        obs = observations[0]
        CellShardFactory(
            radio=obs.radio, mcc=obs.mcc, mnc=obs.mnc,
            lac=obs.lac, cid=obs.cid,
            created=now,
            block_first=today - timedelta(days=10),
            block_last=today,
            block_count=1,
        )
        self.session.commit()
        self._queue_and_update_cell(observations)

        blocks = []
        for obs in observations:
            shard = CellShard.shard_model(obs.cellid)
            cell = (self.session.query(shard)
                                .filter(shard.cellid == obs.cellid)).one()
            if cell.blocked():
                blocks.append(cell)

        self.assertEqual(len(blocks), 1)
        self.check_statcounter(StatKey.cell, 2)
        self.check_statcounter(StatKey.unique_cell, 2)
Esempio n. 4
0
    def test_blocklist(self):
        now = util.utcnow()
        today = now.date()
        observations = CellObservationFactory.build_batch(3)
        obs = observations[0]
        CellShardFactory(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            created=now,
            block_first=today - timedelta(days=10),
            block_last=today,
            block_count=1,
        )
        self.session.commit()
        self._queue_and_update(observations)

        blocks = []
        for obs in observations:
            shard = CellShard.shard_model(obs.cellid)
            cell = (self.session.query(shard).filter(
                shard.cellid == obs.cellid)).one()
            if cell.blocked():
                blocks.append(cell)

        self.assertEqual(len(blocks), 1)
        self.check_statcounter(StatKey.cell, 2)
        self.check_statcounter(StatKey.unique_cell, 2)
Esempio n. 5
0
    def test_monitor_measures(self):
        result = monitor_measures.delay().get()
        self.check_stats(
            gauge=[('table.cell_measure', 1), ('table.wifi_measure', 1)],
        )
        self.assertEqual(result, {'cell_measure': -1, 'wifi_measure': -1})

        # add some observations
        CellObservationFactory.create_batch(3)
        WifiObservationFactory.create_batch(5)
        self.session.flush()

        result = monitor_measures.delay().get()
        self.check_stats(
            gauge=[('table.cell_measure', 2), ('table.wifi_measure', 2)],
        )
        self.assertEqual(result, {'cell_measure': 3, 'wifi_measure': 5})
Esempio n. 6
0
    def test_lock_timeout(self, celery, redis, session, session2, metricsmock,
                          restore_db):
        obs = CellObservationFactory.build()
        cell = CellShardFactory.build(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            samples=10,
        )
        session2.add(cell)
        session2.flush()

        orig_add_area = CellUpdater.add_area_update
        orig_wait = CellUpdater._retry_wait
        num = [0]

        def mock_area(self, updated_areas, key, num=num, session2=session2):
            orig_add_area(self, updated_areas, key)
            num[0] += 1
            if num[0] == 2:
                session2.rollback()

        try:
            CellUpdater._retry_wait = 0.0001
            session.execute("set session innodb_lock_wait_timeout = 1")
            with mock.patch.object(CellUpdater, "add_area_update", mock_area):
                self.queue_and_update(celery, [obs])

            # the inner task logic was called exactly twice
            assert num[0] == 2

            shard = CellShard.shard_model(obs.cellid)
            cells = session.query(shard).all()
            assert len(cells) == 1
            assert cells[0].samples == 1

            self.check_statcounter(redis, StatKey.cell, 1)
            self.check_statcounter(redis, StatKey.unique_cell, 1)

            # Assert generated metrics are correct
            assert (len(
                metricsmock.filter_records("incr",
                                           "data.observation.insert",
                                           value=1,
                                           tags=["type:cell"])) == 1)
            assert (len(
                metricsmock.filter_records("timing",
                                           "task",
                                           tags=["task:data.update_cell"
                                                 ])) == 1)
        finally:
            CellUpdater._retry_wait = orig_wait
            session.execute(text("drop table %s;" % cell.__tablename__))
Esempio n. 7
0
 def test_json(self):
     obs = CellObservationFactory.build(accuracy=None)
     result = CellObservation.from_json(simplejson.loads(
         simplejson.dumps(obs.to_json())))
     self.assertTrue(type(result), CellObservation)
     self.assertTrue(result.accuracy is None)
     self.assertEqual(type(result.radio), Radio)
     self.assertEqual(result.radio, obs.radio)
     self.assertEqual(result.mcc, obs.mcc)
     self.assertEqual(result.mnc, obs.mnc)
     self.assertEqual(result.lac, obs.lac)
     self.assertEqual(result.cid, obs.cid)
     self.assertEqual(result.lat, obs.lat)
     self.assertEqual(result.lon, obs.lon)
Esempio n. 8
0
    def test_delete_cell_observations(self):
        obs = CellObservationFactory.create_batch(50, created=self.old)
        self.session.flush()

        start_id = obs[0].id + 20
        block = ObservationBlockFactory(
            measure_type=ObservationType.cell,
            start_id=start_id, end_id=start_id + 20, archive_date=None)
        self.session.commit()

        delete_cellmeasure_records.delay(batch=3).get()

        self.assertEquals(self.session.query(CellObservation).count(), 30)
        self.assertTrue(block.archive_date is not None)
Esempio n. 9
0
    def test_shard_queues(self):  # BBB
        observations = CellObservationFactory.build_batch(3)
        data_queues = self.celery_app.data_queues
        single_queue = data_queues['update_cell']
        single_queue.enqueue(observations)
        update_cell.delay().get()

        self.assertEqual(single_queue.size(), 0)

        total = 0
        for shard_id in CellShard.shards().keys():
            total += data_queues['update_cell_' + shard_id].size()

        self.assertEqual(total, 3)
Esempio n. 10
0
    def test_shard_queues(self):  # BBB
        observations = CellObservationFactory.build_batch(3)
        data_queues = self.celery_app.data_queues
        single_queue = data_queues['update_cell']
        single_queue.enqueue(observations)
        update_cell.delay().get()

        self.assertEqual(single_queue.size(), 0)

        total = 0
        for shard_id in CellShard.shards().keys():
            total += data_queues['update_cell_' + shard_id].size()

        self.assertEqual(total, 3)
Esempio n. 11
0
    def test_lock_timeout(self):
        obs = CellObservationFactory.build()
        cell = CellShardFactory.build(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            samples=10,
        )
        self.db_ro_session.add(cell)
        self.db_ro_session.flush()

        orig_add_area = CellUpdater.add_area_update
        orig_wait = CellUpdater._retry_wait
        num = [0]

        def mock_area(self,
                      updated_areas,
                      key,
                      num=num,
                      ro_session=self.db_ro_session):
            orig_add_area(self, updated_areas, key)
            num[0] += 1
            if num[0] == 2:
                ro_session.rollback()

        try:
            CellUpdater._retry_wait = 0.001
            self.session.execute('set session innodb_lock_wait_timeout = 1')
            with mock.patch.object(CellUpdater, 'add_area_update', mock_area):
                self._queue_and_update_cell([obs])
        finally:
            CellUpdater._retry_wait = orig_wait

        # the inner task logic was called exactly twice
        self.assertEqual(num[0], 2)

        shard = CellShard.shard_model(obs.cellid)
        cells = self.session.query(shard).all()
        self.assertEqual(len(cells), 1)
        self.assertEqual(cells[0].samples, 1)

        self.check_statcounter(StatKey.cell, 1)
        self.check_statcounter(StatKey.unique_cell, 1)
        self.check_stats(
            counter=[('data.observation.insert', 1, ['type:cell'])],
            timer=[('task', 1, ['task:data.update_cell'])],
        )
Esempio n. 12
0
    def test_json(self):
        obs = CellObservationFactory.build(accuracy=None, source="fixed")
        result = CellObservation.from_json(json.loads(json.dumps(obs.to_json())))

        assert type(result) is CellObservation
        assert result.accuracy is None
        assert type(result.radio), Radio
        assert result.radio == obs.radio
        assert result.mcc == obs.mcc
        assert result.mnc == obs.mnc
        assert result.lac == obs.lac
        assert result.cid == obs.cid
        assert result.lat == obs.lat
        assert result.lon == obs.lon
        assert result.source is ReportSource.fixed
        assert type(result.source) is ReportSource
Esempio n. 13
0
    def test_delete_cell_observations(self):
        obs = CellObservationFactory.create_batch(50, created=self.old)
        self.session.flush()

        start_id = obs[0].id + 20
        block = ObservationBlockFactory(measure_type=ObservationType.cell,
                                        start_id=start_id,
                                        end_id=start_id + 20,
                                        archive_date=None)
        self.session.commit()

        with patch.object(S3Backend, 'check_archive', lambda x, y, z: True):
            delete_cellmeasure_records.delay(batch=3).get()

        self.assertEquals(self.session.query(CellObservation).count(), 30)
        self.assertTrue(block.archive_date is not None)
Esempio n. 14
0
    def test_lock_timeout(self, celery, db_rw_drop_table,
                          redis, ro_session, session, stats):
        obs = CellObservationFactory.build()
        cell = CellShardFactory.build(
            radio=obs.radio, mcc=obs.mcc, mnc=obs.mnc,
            lac=obs.lac, cid=obs.cid,
            samples=10,
        )
        ro_session.add(cell)
        ro_session.flush()

        orig_add_area = CellUpdater.add_area_update
        orig_wait = CellUpdater._retry_wait
        num = [0]

        def mock_area(self, updated_areas, key,
                      num=num, ro_session=ro_session):
            orig_add_area(self, updated_areas, key)
            num[0] += 1
            if num[0] == 2:
                ro_session.rollback()

        try:
            CellUpdater._retry_wait = 0.0001
            session.execute('set session innodb_lock_wait_timeout = 1')
            with mock.patch.object(CellUpdater, 'add_area_update', mock_area):
                self.queue_and_update(celery, [obs])

            # the inner task logic was called exactly twice
            assert num[0] == 2

            shard = CellShard.shard_model(obs.cellid)
            cells = session.query(shard).all()
            assert len(cells) == 1
            assert cells[0].samples == 1

            self.check_statcounter(redis, StatKey.cell, 1)
            self.check_statcounter(redis, StatKey.unique_cell, 1)
            stats.check(
                counter=[('data.observation.insert', 1, ['type:cell'])],
                timer=[('task', 1, ['task:data.update_cell'])],
            )
        finally:
            CellUpdater._retry_wait = orig_wait
            for model in CellShard.shards().values():
                session.execute(text(
                    'drop table %s;' % model.__tablename__))
    def test_lock_timeout(self, celery, redis, session, session2, stats,
                          restore_db):
        obs = CellObservationFactory.build()
        cell = CellShardFactory.build(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            samples=10,
        )
        session2.add(cell)
        session2.flush()

        orig_add_area = CellUpdater.add_area_update
        orig_wait = CellUpdater._retry_wait
        num = [0]

        def mock_area(self, updated_areas, key, num=num, session2=session2):
            orig_add_area(self, updated_areas, key)
            num[0] += 1
            if num[0] == 2:
                session2.rollback()

        try:
            CellUpdater._retry_wait = 0.0001
            session.execute('set session innodb_lock_wait_timeout = 1')
            with mock.patch.object(CellUpdater, 'add_area_update', mock_area):
                self.queue_and_update(celery, [obs])

            # the inner task logic was called exactly twice
            assert num[0] == 2

            shard = CellShard.shard_model(obs.cellid)
            cells = session.query(shard).all()
            assert len(cells) == 1
            assert cells[0].samples == 1

            self.check_statcounter(redis, StatKey.cell, 1)
            self.check_statcounter(redis, StatKey.unique_cell, 1)
            stats.check(
                counter=[('data.observation.insert', 1, ['type:cell'])],
                timer=[('task', 1, ['task:data.update_cell'])],
            )
        finally:
            CellUpdater._retry_wait = orig_wait
            session.execute(text('drop table %s;' % cell.__tablename__))
Esempio n. 16
0
    def test_json(self):
        obs = CellObservationFactory.build(
            accuracy=None, source='fixed')
        result = CellObservation.from_json(simplejson.loads(
            simplejson.dumps(obs.to_json())))

        assert type(result) is CellObservation
        assert result.accuracy is None
        assert type(result.radio), Radio
        assert result.radio == obs.radio
        assert result.mcc == obs.mcc
        assert result.mnc == obs.mnc
        assert result.lac == obs.lac
        assert result.cid == obs.cid
        assert result.lat == obs.lat
        assert result.lon == obs.lon
        assert result.source is ReportSource.fixed
        assert type(result.source) is ReportSource
Esempio n. 17
0
    def test_lock_timeout(self):
        obs = CellObservationFactory.build()
        cell = CellShardFactory.build(
            radio=obs.radio, mcc=obs.mcc, mnc=obs.mnc,
            lac=obs.lac, cid=obs.cid,
            samples=10,
        )
        self.db_ro_session.add(cell)
        self.db_ro_session.flush()

        orig_add_area = CellUpdater.add_area_update
        orig_wait = CellUpdater._retry_wait
        num = [0]

        def mock_area(self, updated_areas, key,
                      num=num, ro_session=self.db_ro_session):
            orig_add_area(self, updated_areas, key)
            num[0] += 1
            if num[0] == 2:
                ro_session.rollback()

        try:
            CellUpdater._retry_wait = 0.001
            self.session.execute('set session innodb_lock_wait_timeout = 1')
            with mock.patch.object(CellUpdater, 'add_area_update', mock_area):
                self._queue_and_update_cell([obs])
        finally:
            CellUpdater._retry_wait = orig_wait

        # the inner task logic was called exactly twice
        self.assertEqual(num[0], 2)

        shard = CellShard.shard_model(obs.cellid)
        cells = self.session.query(shard).all()
        self.assertEqual(len(cells), 1)
        self.assertEqual(cells[0].samples, 1)

        self.check_statcounter(StatKey.cell, 1)
        self.check_statcounter(StatKey.unique_cell, 1)
        self.check_stats(
            counter=[('data.observation.insert', 1, ['type:cell'])],
            timer=[('task', 1, ['task:data.update_cell'])],
        )
Esempio n. 18
0
    def test_backup_cell_to_s3(self):
        batch_size = 10
        obs = CellObservationFactory.create_batch(batch_size, created=self.old)
        self.session.flush()
        start_id = obs[0].id

        blocks = schedule_cellmeasure_archival.delay(batch=batch_size).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id, start_id + batch_size))

        write_cellmeasure_s3_backups.delay(cleanup_zip=False).get()

        blocks = self.session.query(ObservationBlock).all()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]

        self.assertEqual(block.archive_sha, '20bytes_mean_success')
        self.assertEqual(block.s3_key, 'skipped')
        self.assertTrue(block.archive_date is None)
Esempio n. 19
0
    def test_backup_cell_to_s3(self):
        batch_size = 10
        obs = CellObservationFactory.create_batch(batch_size, created=self.old)
        self.session.flush()
        start_id = obs[0].id

        blocks = schedule_cellmeasure_archival.delay(batch=batch_size).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id, start_id + batch_size))

        with mock_s3():
            with patch.object(S3Backend, 'backup_archive',
                              lambda x, y, z: True):
                write_cellmeasure_s3_backups.delay(cleanup_zip=False).get()

                raven_msgs = self.raven_client.msgs
                fname = [
                    m['message'].split(':')[1] for m in raven_msgs
                    if m['message'].startswith('s3.backup:')
                ][0]
                myzip = ZipFile(fname)
                try:
                    contents = set(myzip.namelist())
                    expected_contents = set(
                        ['alembic_revision.txt', 'cell_measure.csv'])
                    self.assertEquals(expected_contents, contents)
                finally:
                    myzip.close()

        blocks = self.session.query(ObservationBlock).all()

        self.assertEquals(len(blocks), 1)
        block = blocks[0]

        actual_sha = hashlib.sha1()
        actual_sha.update(open(fname, 'rb').read())
        self.assertEquals(block.archive_sha, actual_sha.digest())
        self.assertTrue(block.s3_key is not None)
        self.assertTrue('/cell_' in block.s3_key)
        self.assertTrue(block.archive_date is None)
Esempio n. 20
0
    def test_blocklist(self):
        now = util.utcnow()
        observations = CellObservationFactory.build_batch(3)
        obs = observations[0]

        block = CellBlocklist(
            radio=obs.radio, mcc=obs.mcc, mnc=obs.mnc,
            lac=obs.lac, cid=obs.cid,
            time=now, count=1,
        )
        self.session.add(block)
        self.session.flush()

        self.data_queue.enqueue(observations)
        self.assertEqual(self.data_queue.size(), 3)
        update_cell.delay().get()

        cells = self.session.query(Cell).all()
        self.assertEqual(len(cells), 2)

        self.check_statcounter(StatKey.cell, 2)
        self.check_statcounter(StatKey.unique_cell, 2)
Esempio n. 21
0
    def test_backup_cell_to_s3(self):
        batch_size = 10
        obs = CellObservationFactory.create_batch(batch_size, created=self.old)
        self.session.flush()
        start_id = obs[0].id

        blocks = schedule_cellmeasure_archival.delay(batch=batch_size).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id, start_id + batch_size))

        with mock_s3():
            with patch.object(S3Backend,
                              'backup_archive', lambda x, y, z: True):
                write_cellmeasure_s3_backups.delay(cleanup_zip=False).get()

                raven_msgs = self.raven_client.msgs
                fname = [m['message'].split(':')[1] for m in raven_msgs
                         if m['message'].startswith('s3.backup:')][0]
                myzip = ZipFile(fname)
                try:
                    contents = set(myzip.namelist())
                    expected_contents = set(['alembic_revision.txt',
                                             'cell_measure.csv'])
                    self.assertEquals(expected_contents, contents)
                finally:
                    myzip.close()

        blocks = self.session.query(ObservationBlock).all()

        self.assertEquals(len(blocks), 1)
        block = blocks[0]

        actual_sha = hashlib.sha1()
        actual_sha.update(open(fname, 'rb').read())
        self.assertEquals(block.archive_sha, actual_sha.digest())
        self.assertTrue(block.s3_key is not None)
        self.assertTrue('/cell_' in block.s3_key)
        self.assertTrue(block.archive_date is None)
Esempio n. 22
0
    def test_schedule_cell_observations(self):
        blocks = schedule_cellmeasure_archival.delay(batch=1).get()
        self.assertEquals(len(blocks), 0)

        obs = CellObservationFactory.create_batch(20, created=self.old)
        self.session.flush()
        start_id = obs[0].id

        blocks = schedule_cellmeasure_archival.delay(batch=15).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id, start_id + 15))

        blocks = schedule_cellmeasure_archival.delay(batch=6).get()
        self.assertEquals(len(blocks), 0)

        blocks = schedule_cellmeasure_archival.delay(batch=5).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id + 15, start_id + 20))

        blocks = schedule_cellmeasure_archival.delay(batch=1).get()
        self.assertEquals(len(blocks), 0)
Esempio n. 23
0
    def test_schedule_cell_observations(self):
        blocks = schedule_cellmeasure_archival.delay(batch=1).get()
        self.assertEquals(len(blocks), 0)

        obs = CellObservationFactory.create_batch(20, created=self.old)
        self.session.flush()
        start_id = obs[0].id

        blocks = schedule_cellmeasure_archival.delay(batch=15).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id, start_id + 15))

        blocks = schedule_cellmeasure_archival.delay(batch=6).get()
        self.assertEquals(len(blocks), 0)

        blocks = schedule_cellmeasure_archival.delay(batch=5).get()
        self.assertEquals(len(blocks), 1)
        block = blocks[0]
        self.assertEquals(block, (start_id + 15, start_id + 20))

        blocks = schedule_cellmeasure_archival.delay(batch=1).get()
        self.assertEquals(len(blocks), 0)
Esempio n. 24
0
    def test_cell_histogram(self):
        session = self.session
        today = util.utcnow()
        yesterday = (today - timedelta(1))
        two_days = (today - timedelta(2))
        long_ago = (today - timedelta(3))

        CellObservationFactory.create_batch(2, created=today)
        CellObservationFactory(created=yesterday)
        CellObservationFactory.create_batch(3, created=two_days)
        CellObservationFactory(created=long_ago)
        session.flush()

        cell_histogram.delay(ago=3).get()

        stats = session.query(Stat).order_by(Stat.time).all()
        self.assertEqual(len(stats), 1)
        self.assertEqual(stats[0].key, StatKey.cell)
        self.assertEqual(stats[0].time, long_ago.date())
        self.assertEqual(stats[0].value, 1)

        # fill up newer dates
        cell_histogram.delay(ago=2).get()
        cell_histogram.delay(ago=1).get()
        cell_histogram.delay(ago=0).get()

        # test duplicate execution
        cell_histogram.delay(ago=1).get()

        stats = session.query(Stat.time, Stat.value).order_by(Stat.time).all()
        self.assertEqual(len(stats), 4)
        self.assertEqual(dict(stats), {
                         long_ago.date(): 1,
                         two_days.date(): 4,
                         yesterday.date(): 5,
                         today.date(): 7})
Esempio n. 25
0
    def test_retriable_exceptions(
        self,
        celery,
        redis,
        session,
        db_shared_session,
        metricsmock,
        errclass,
        errno,
        errmsg,
        backoff_sleep_mock,
    ):
        """Test database exceptions where the task should wait and try again."""

        obs = CellObservationFactory.build(radio=Radio.lte)
        shard = CellShard.shard_model(obs.cellid)
        cell = CellShardFactory.build(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            samples=10,
            created=datetime(2019, 12, 5, tzinfo=UTC),
        )
        session.add(cell)
        session.commit()
        session.begin_nested()  # Protect test cell from task rollback

        error = errclass(errno, errmsg)
        wrapped = InterfaceError.instance(
            statement="SELECT COUNT(*) FROM cell_area",
            params={},
            orig=error,
            dbapi_base_err=MySQLError,
        )
        with mock.patch.object(CellUpdater,
                               "add_area_update",
                               side_effect=[wrapped, None]):
            self._queue_and_update(celery, [obs], update_cell)
            assert CellUpdater.add_area_update.call_count == 2
            backoff_sleep_mock.assert_called_once()

        cells = session.query(shard).all()
        assert len(cells) == 1
        self.check_statcounter(redis, StatKey.cell, 1)

        # The existing cell record was updated
        cell = cells[0]
        assert cell.samples == 11
        assert cell.created == datetime(2019, 12, 5, tzinfo=UTC)
        self.check_statcounter(redis, StatKey.unique_cell, 0)

        # Assert generated metrics are correct
        metricsmock.assert_incr_once("data.observation.insert",
                                     value=1,
                                     tags=["type:cell"])
        metricsmock.assert_incr_once("data.station.confirm",
                                     value=1,
                                     tags=["type:cell"])
        metricsmock.assert_timing_once("task", tags=["task:data.update_cell"])
        metricsmock.assert_incr_once("data.station.dberror",
                                     tags=["type:cell",
                                           "errno:%s" % errno])
Esempio n. 26
0
 def obs_factory(**kw):
     obs = CellObservationFactory.create(**kw)
     observations.append(obs)
Esempio n. 27
0
 def obs_factory(**kw):
     obs = CellObservationFactory.build(**kw)
     observations.append(obs)
Esempio n. 28
0
 def obs_factory(**kw):
     obs = CellObservationFactory.build(**kw)
     if obs is not None:
         observations.append(obs)
Esempio n. 29
0
    def test_retriable_exceptions(self, celery, redis, session, db,
                                  metricsmock, errclass, errno, errmsg):
        """Test database exceptions where the task should wait and try again."""

        obs = CellObservationFactory.build(radio=Radio.lte)
        shard = CellShard.shard_model(obs.cellid)
        cell = CellShardFactory.build(
            radio=obs.radio,
            mcc=obs.mcc,
            mnc=obs.mnc,
            lac=obs.lac,
            cid=obs.cid,
            samples=10,
            created=datetime(2019, 12, 5, tzinfo=UTC),
        )
        session.add(cell)
        session.commit()
        # TODO: Find a more elegant way to do this
        db.tests_task_use_savepoint = True

        error = errclass(errno, errmsg)
        wrapped = InterfaceError.instance(
            statement="SELECT COUNT(*) FROM cell_area",
            params={},
            orig=error,
            dbapi_base_err=MySQLError,
        )
        with mock.patch.object(
                CellUpdater, "add_area_update", side_effect=[
                    wrapped, None
                ]), mock.patch("ichnaea.data.station.time.sleep") as sleepy:
            self._queue_and_update(celery, [obs], update_cell)
            assert CellUpdater.add_area_update.call_count == 2
            sleepy.assert_called_once_with(1)

        del db.tests_task_use_savepoint

        cells = session.query(shard).all()
        assert len(cells) == 1
        self.check_statcounter(redis, StatKey.cell, 1)

        # The existing cell record was updated
        cell = cells[0]
        assert cell.samples == 11
        assert cell.created == datetime(2019, 12, 5, tzinfo=UTC)
        self.check_statcounter(redis, StatKey.unique_cell, 0)

        # Assert generated metrics are correct
        assert metricsmock.has_record("incr",
                                      "data.observation.insert",
                                      value=1,
                                      tags=["type:cell"])
        assert metricsmock.has_record("incr",
                                      "data.station.confirm",
                                      value=1,
                                      tags=["type:cell"])
        assert metricsmock.has_record("timing",
                                      "task",
                                      tags=["task:data.update_cell"])
        assert metricsmock.has_record(
            "incr",
            "data.station.dberror",
            value=1,
            tags=["type:cell", "errno:%s" % errno],
        )
Esempio n. 30
0
    def test_blocklist_temporary_and_permanent(self):
        # This test simulates a cell that moves once a month, for 2 years.
        # The first 2 * PERMANENT_BLOCKLIST_THRESHOLD (12) moves should be
        # temporary, forgotten after a week; after that it should be
        # permanently blocklisted.

        now = util.utcnow()
        # Station moves between these 4 points, all in the USA:
        points = [
            (40.0, -74.0),  # NYC
            (37.0, -122.0),  # SF
            (47.0, -122.0),  # Seattle
            (25.0, -80.0),  # Miami
        ]

        obs = CellObservationFactory(
            mcc=310, lat=points[0][0], lon=points[0][1])

        N = 4 * PERMANENT_BLOCKLIST_THRESHOLD
        for month in range(0, N):
            days_ago = (N - (month + 1)) * 30
            time = now - timedelta(days=days_ago)

            obs.lat = points[month % 4][0]
            obs.lon = points[month % 4][1]

            # Assuming PERMANENT_BLOCKLIST_THRESHOLD == 6:
            #
            # 0th insert will create the station
            # 1st insert will create first blocklist entry, delete station
            # 2nd insert will recreate the station at new position
            # 3rd insert will update blocklist, re-delete station
            # 4th insert will recreate the station at new position
            # 5th insert will update blocklist, re-delete station
            # 6th insert will recreate the station at new position
            # ...
            # 11th insert will make blocklisting permanent, re-delete station
            # 12th insert will not recreate station
            # 13th insert will not recreate station
            # ...
            # 23rd insert will not recreate station

            blocks = self.session.query(CellBlocklist).all()
            if month < 2:
                self.assertEqual(len(blocks), 0)
            else:
                self.assertEqual(len(blocks), 1)
                # force the blocklist back in time to whenever the
                # observation was supposedly inserted.
                block = blocks[0]
                block.time = time
                self.session.commit()

            if month < N / 2:
                # We still haven't exceeded the threshold, so the
                # observation was admitted.
                self.data_queue.enqueue([obs])
                if month % 2 == 0:
                    # The station was (re)created.
                    self.assertEqual(update_cell.delay().get(), (1, 0))
                    # Rescan lacs to update entries
                    self.assertEqual(scan_areas.delay().get(), 1)
                    # One cell + one cell-LAC record should exist.
                    self.assertEqual(self.session.query(Cell).count(), 1)
                    self.assertEqual(self.session.query(CellArea).count(), 1)
                else:
                    # The station existed and was seen moving,
                    # thereby activating the blocklist and deleting the cell.
                    self.assertEqual(update_cell.delay().get(), (1, 1))
                    # Rescan lacs to delete orphaned lac entry
                    self.assertEqual(scan_areas.delay().get(), 1)
                    if month > 1:
                        self.assertEqual(block.count, ((month + 1) / 2))
                    self.assertEqual(
                        self.session.query(CellBlocklist).count(), 1)
                    self.assertEqual(self.session.query(Cell).count(), 0)

                    # Try adding one more observation
                    # to be sure it is dropped by the now-active blocklist.
                    self.data_queue.enqueue([obs])
                    self.assertEqual(update_cell.delay().get(), (0, 0))
            else:
                # Blocklist has exceeded threshold, gone to permanent mode,
                # so no observation accepted, no stations seen.
                self.data_queue.enqueue([obs])
                self.assertEqual(update_cell.delay().get(), (0, 0))
Esempio n. 31
0
 def obs_factory(**kw):
     obs = CellObservationFactory.build(**kw)
     if obs is not None:
         observations.append(obs)