示例#1
0
    def test_truncate_timeline_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'

        # Preload some fake records (the contents don't matter.)
        records = list(itertools.islice(self.records, 10))
        for record in records:
            client.zadd(timeline, record.timestamp, record.key)
            client.set(make_record_key(timeline, record.key), 'data')

        with self.assertChanges(lambda: client.zcard(timeline),
                                before=10,
                                after=5):
            truncate_timeline((timeline, ), (5, ), client)

            # Ensure the early records don't exist.
            for record in records[:5]:
                assert not client.zscore(timeline, record.key)
                assert not client.exists(make_record_key(timeline, record.key))

            # Ensure the later records do exist.
            for record in records[-5:]:
                assert client.zscore(timeline,
                                     record.key) == float(record.timestamp)
                assert client.exists(make_record_key(timeline, record.key))
示例#2
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')

        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        keys = ('waiting', 'ready', 'last-processed')

        # The first addition should cause the timeline to be added to the ready set.
        with self.assertChanges(ready_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None

        # Move the timeline from the ready set to the waiting set.
        client.zrem('ready', timeline)
        client.zadd('waiting', timestamp, timeline)
        client.set('last-processed', timestamp)

        increment = 1
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None

        # Make sure the schedule respects the maximum value.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None

        # Test to ensure a missing last processed timestamp can be handled
        # correctly (chooses minimum of schedule value and record timestamp.)
        client.zadd('waiting', timestamp, timeline)
        client.delete('last-processed')
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None

        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
示例#3
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')

        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        keys = ('waiting', 'ready', 'last-processed')

        # The first addition should cause the timeline to be added to the ready set.
        with self.assertChanges(ready_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None

        # Move the timeline from the ready set to the waiting set.
        client.zrem('ready', timeline)
        client.zadd('waiting', timestamp, timeline)
        client.set('last-processed', timestamp)

        increment = 1
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None

        # Make sure the schedule respects the maximum value.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None

        # Test to ensure a missing last processed timestamp can be handled
        # correctly (chooses minimum of schedule value and record timestamp.)
        client.zadd('waiting', timestamp, timeline)
        client.delete('last-processed')
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None

        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
示例#4
0
文件: test_redis.py 项目: ixc/sentry
    def test_truncate_timeline_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'

        # Preload some fake records (the contents don't matter.)
        records = list(itertools.islice(self.records, 10))
        for record in records:
            client.zadd(timeline, record.timestamp, record.key)
            client.set(make_record_key(timeline, record.key), 'data')

        with self.assertChanges(lambda: client.zcard(timeline), before=10, after=5):
            truncate_timeline((timeline,), (5,), client)

            # Ensure the early records don't exist.
            for record in records[:5]:
                assert not client.zscore(timeline, record.key)
                assert not client.exists(make_record_key(timeline, record.key))

            # Ensure the later records do exist.
            for record in records[-5:]:
                assert client.zscore(timeline, record.key) == float(record.timestamp)
                assert client.exists(make_record_key(timeline, record.key))
示例#5
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')
        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        # The first addition should cause the timeline to be added to the waiting set.
        with self.assertChanges(waiting_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_waiting_set, before=None, after=timestamp):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp), client)

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp + 50), client)

        # If we see a record with a timestamp earlier than the schedule time,
        # we should change the schedule.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 50):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client)

        # Move the timeline from the waiting set to the ready set.
        client.zrem('waiting', timeline)
        client.zadd('ready', timestamp, timeline)

        # Nothing should change.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client)
示例#6
0
class DistanceCalculator(object):
    _geoip4 = None
    _geoip6 = None

    def __init__(self):
        # Load the GeoIP databases into class attributes since they each need 20+ MB in memory
        if not self.__class__._geoip4:
            self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE)
        if not self.__class__._geoip6:
            self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE)
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'],
                                 Config.REDIS['DB'])

    @staticmethod
    def _haversine(lon1, lat1, lon2, lat2):
        """
        Calculate the great circle distance between two points
        on the earth (specified in decimal degrees)
        """
        # convert decimal degrees to radians
        lon1, lat1, lon2, lat2 = map(lambda v: radians(float(v)),
                                     [lon1, lat1, lon2, lat2])
        # haversine formula
        dlon = lon2 - lon1
        dlat = lat2 - lat1
        a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
        c = 2 * asin(sqrt(a))
        km = 6367 * c  # convert to km
        return km

    def get_mirror_distances(self, address):
        last_update = self.redis.get(Config.KEY_LAST_UPDATE)
        key = Config.KEY_MIRROR.format(address, last_update)
        distances = OrderedDict(self.redis.zrange(key, 0, -1, withscores=True))
        if not distances:
            if address.startswith("::ffff:"):
                address = address.replace("::ffff:", "")
            try:
                if ":" in address:
                    record = self._geoip6.record_by_addr(address)
                else:
                    record = self._geoip4.record_by_addr(address)
            except socket.error:
                raise GeoIPLookupError()
            if not record:
                raise GeoIPLookupError()
            lat = record['latitude']
            lon = record['longitude']

            distances = OrderedDict(
                sorted(((mirror.name,
                         self._haversine(lon, lat, mirror.lon, mirror.lat))
                        for mirror in Mirror.objects.filter(age__lt=3601)),
                       key=itemgetter(1)))
            if distances:
                self.redis.zadd(key, **distances)
                self.redis.expire(key, 60 * 10)  # 10 min
        return distances

    def get_nearest_mirror(self, address):
        try:
            distances = self.get_mirror_distances(address)
            if distances:
                return next(distances.iteritems())[0]
            return Config.FALLBACK_MIRROR
        except GeoIPLookupError:
            return Config.FALLBACK_MIRROR
示例#7
0
class DistanceCalculator(object):
    _geoip4 = None
    _geoip6 = None

    def __init__(self):
        # Load the GeoIP databases into class attributes since they each need 20+ MB in memory
        if not self.__class__._geoip4:
            self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE)
        if not self.__class__._geoip6:
            self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE)
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])

    @staticmethod
    def _haversine(lon1, lat1, lon2, lat2):
        """
        Calculate the great circle distance between two points
        on the earth (specified in decimal degrees)
        """
        # convert decimal degrees to radians
        lon1, lat1, lon2, lat2 = map(lambda v: radians(float(v)), [lon1, lat1, lon2, lat2])
        # haversine formula
        dlon = lon2 - lon1
        dlat = lat2 - lat1
        a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
        c = 2 * asin(sqrt(a))
        km = 6367 * c  # convert to km
        return km

    def get_mirror_distances(self, address):
        last_update = self.redis.get(Config.KEY_LAST_UPDATE)
        key = Config.KEY_MIRROR.format(address, last_update)
        distances = OrderedDict(self.redis.zrange(key, 0, -1, withscores=True))
        if not distances:
            if address.startswith("::ffff:"):
                address = address.replace("::ffff:", "")
            try:
                if ":" in address:
                    record = self._geoip6.record_by_addr(address)
                else:
                    record = self._geoip4.record_by_addr(address)
            except socket.error:
                raise GeoIPLookupError()
            if not record:
                raise GeoIPLookupError()
            lat = record['latitude']
            lon = record['longitude']

            distances = OrderedDict(
                sorted(
                    (
                        (mirror.name, self._haversine(lon, lat, mirror.lon, mirror.lat))
                        for mirror in Mirror.objects.filter(age__lt=3601)
                    ),
                    key=itemgetter(1)
                )
            )
            if distances:
                self.redis.zadd(key, **distances)
                self.redis.expire(key, 60 * 10)  # 10 min
        return distances

    def get_nearest_mirror(self, address):
        try:
            distances = self.get_mirror_distances(address)
            if distances:
                return next(distances.iteritems())[0]
            return Config.FALLBACK_MIRROR
        except GeoIPLookupError:
            return Config.FALLBACK_MIRROR