class redis(object):
    def __init__(self,host='127.0.0.1',port=6379):
        self.r = StrictRedis(host,port)
    def rec(self,k,v):
        self.r.set(k, v)
    def rpush(self,v):
        self.r.rpush('alerts',v)
Exemple #2
0
class Record(object):
    def __init__(self,host='127.0.0.1',port=6379):         
        self.r=StrictRedis()
    
    def run(self):
        while True:
            value=self.r.rpop('alerts')
            if value:
                obj=json.loads(value)
                keyredis=obj['src_ip']+'_'+str(obj['src_port'])+'_'+ obj['dest_ip']+'_'+str(obj['dest_port'])
                entry=self.r.get(keyredis)
                if entry:
                    restruct=json.loads(entry)
                else:
                    restruct={}
                if not 'http' in restruct:
                    restruct['http']=[]
                if not 'alerts' in restruct:
                    restruct['alerts']=[]
                if not 'files' in restruct:
                    restruct['files']=[]  
                if 'alert' in obj:    
                    restruct['alerts'].append(obj['alert']['signature'])
                if 'fileinfo' in obj:
                    restruct['files'].append(obj['fileinfo'])
                if 'http' in obj:
                    restruct['http'].append(obj['http'])
                if len(restruct)>0:
                    self.r.set(keyredis, json.dumps(restruct))
            else:
                sleep(1)
Exemple #3
0
    def test_truncate_timeline_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'

        # Preload some fake records (the contents don't matter.)
        records = list(itertools.islice(self.records, 10))
        for record in records:
            client.zadd(timeline, record.timestamp, record.key)
            client.set(make_record_key(timeline, record.key), 'data')

        with self.assertChanges(lambda: client.zcard(timeline),
                                before=10,
                                after=5):
            truncate_timeline((timeline, ), (5, ), client)

            # Ensure the early records don't exist.
            for record in records[:5]:
                assert not client.zscore(timeline, record.key)
                assert not client.exists(make_record_key(timeline, record.key))

            # Ensure the later records do exist.
            for record in records[-5:]:
                assert client.zscore(timeline,
                                     record.key) == float(record.timestamp)
                assert client.exists(make_record_key(timeline, record.key))
Exemple #4
0
class RedisDataSource(AbstractDataSource):

    _r = None
    def __init__(self,config):
        if self._validateConfig(config):
            self._r = StrictRedis(host=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_HOST],
                                        port=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_PORT],
                                        db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB])
            logger.debug("Obtained internal redis handler" + str(self._r))
        else:
            raise BaseException("Error validating config ")


    def update(self,item):
        self.store(item)

    def store(self,item):
        self._r.set(item.getHash(), item.getValue())

    def get(self,item):
        return self._r.get(item.getHash())

    def exists(self,item):
        return self.get(item) is not None

    def all(self):

        result = []
        # Obtain all keys
        keys = self._r.keys()

        #For each key, get value
        for k in keys:
            value = self._r.get(k)
            result.append(BaseItem({"origin":"redis"},value))
        #return result
        return result

    def _validateConfig(self,config):

        validator = MultipleConfigValidator(
                        {VALIDATORS_LIST:[ContainsKeyConfigValidator({KEY_VALUE:REDIS_DATASOURCE_CONFIG})]})
        if not validator.validate(config):
            raise BaseException("Config validation error : does not contain " + REDIS_DATASOURCE_CONFIG)

        # Validate redis datasource config
        validator = MultipleConfigValidator(
                        {VALIDATORS_LIST:[ContainsKeysConfigValidator({KEYS_LIST:[REDIS_DATASOURCE_CONFIG_DB,
                                                                                  REDIS_DATASOURCE_CONFIG_HOST,
                                                                                  REDIS_DATASOURCE_CONFIG_PORT]})]})

        if not validator.validate(config[REDIS_DATASOURCE_CONFIG]):
            raise BaseException("Config validation error : config not complete ")

        return True


    def delete(self,item):
        self._r.delete(item.getHash())
Exemple #5
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')

        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        keys = ('waiting', 'ready', 'last-processed')

        # The first addition should cause the timeline to be added to the ready set.
        with self.assertChanges(ready_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None

        # Move the timeline from the ready set to the waiting set.
        client.zrem('ready', timeline)
        client.zadd('waiting', timestamp, timeline)
        client.set('last-processed', timestamp)

        increment = 1
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None

        # Make sure the schedule respects the maximum value.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None

        # Test to ensure a missing last processed timestamp can be handled
        # correctly (chooses minimum of schedule value and record timestamp.)
        client.zadd('waiting', timestamp, timeline)
        client.delete('last-processed')
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None

        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
Exemple #6
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')

        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        keys = ('waiting', 'ready', 'last-processed')

        # The first addition should cause the timeline to be added to the ready set.
        with self.assertChanges(ready_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None

        # Move the timeline from the ready set to the waiting set.
        client.zrem('ready', timeline)
        client.zadd('waiting', timestamp, timeline)
        client.set('last-processed', timestamp)

        increment = 1
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None

        # Make sure the schedule respects the maximum value.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None

        # Test to ensure a missing last processed timestamp can be handled
        # correctly (chooses minimum of schedule value and record timestamp.)
        client.zadd('waiting', timestamp, timeline)
        client.delete('last-processed')
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None

        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
Exemple #7
0
class Command(object):
    def __init__(self):
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'],
                                 Config.REDIS['DB'])

    def run(self):
        log.debug("Updating mirror database")
        geoip = GeoIP(Config.GEOIP_PATH_V4)

        for status in mirror_statuses(
                unofficial_mirrors=Config.UNOFFICIAL_MIRRORS):
            name = status['mirror']
            if name == "a.pypi.python.org":
                # don't include 'a' in the list of mirrors - it's no mirror after all
                continue
            time_diff = status['time_diff']
            if not isinstance(time_diff, timedelta):
                continue

            log.debug("  Processing mirror '%s'", name)
            record = geoip.record_by_name(name)
            lat = record['latitude']
            lon = record['longitude']

            log.debug("    Age: %d, Lat: %0.5f, Lon: %0.5f",
                      time_diff.total_seconds(), lat, lon)

            try:
                mirror = Mirror.objects.get(name=name)
            except ObjectNotFound:
                mirror = Mirror(name=name)
            mirror.age = time_diff.total_seconds()
            mirror.lat = lat
            mirror.lon = lon

            mirror.save()

        self.redis.set(Config.KEY_LAST_UPDATE, time.time())
        log.debug("Finished updating mirror database")
Exemple #8
0
class RedisDataStorage(DataStorage):
    def __init__(self, host, port, level):
        DataStorage.__init__(self, level)
        self._storage = StrictRedis(host=host, port=port)

    def get_key(self, pid):
        return "{}_{}".format(self.storage_level(), pid)

    def get_data(self, pid, default=None):
        result = self._storage.get(name=self.get_key(pid))
        if result is None:
            result = default
        return result

    def store_data(self, pid, data):
        self._storage.set(name=self.get_key(pid), value=data)

    def delete_data(self, pid):
        self._storage.delete(self.get_key(pid))

    def exists(self, pid):
        return self._storage.exists(self.get_key(pid))
Exemple #9
0
    def test_truncate_timeline_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'

        # Preload some fake records (the contents don't matter.)
        records = list(itertools.islice(self.records, 10))
        for record in records:
            client.zadd(timeline, record.timestamp, record.key)
            client.set(make_record_key(timeline, record.key), 'data')

        with self.assertChanges(lambda: client.zcard(timeline), before=10, after=5):
            truncate_timeline((timeline,), (5,), client)

            # Ensure the early records don't exist.
            for record in records[:5]:
                assert not client.zscore(timeline, record.key)
                assert not client.exists(make_record_key(timeline, record.key))

            # Ensure the later records do exist.
            for record in records[-5:]:
                assert client.zscore(timeline, record.key) == float(record.timestamp)
                assert client.exists(make_record_key(timeline, record.key))
class RedisCache(CacheBase):
    def __init__(self, config, section):
        from redis.client import StrictRedis
        self.conn = StrictRedis(
            config.get(section, 'redis-server'),
            config.getint(section, 'redis-port'),
            config.getint(section, 'redis-db'),
            decode_responses=True
        )

    def check_password(self, user, password):
        """Check the given user and password.

        Returns None on cache miss, True if password matches, False if not.
        """
        cached = self.conn.get(self.prefix('%s-pass' % user))
        if cached is None:
            return cached
        else:
            return cached == self.hash(password, cached)

    def set_password(self, user, password):
        self.conn.set(self.prefix('%s-pass' % user), self.hash(password, None), ex=self.expire)

    def in_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        if not self.conn.exists(key):
            return None

        return not self.conn.smembers(key).isdisjoint(groups)

    def set_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        pipe = self.conn.pipeline()
        pipe.sadd(key, *groups).expire(key, self.expire)
        pipe.execute()
class RedisCache(CacheBase):
    def __init__(self, config, section):
        from redis.client import StrictRedis
        self.conn = StrictRedis(config.get(section, 'redis-server'),
                                config.getint(section, 'redis-port'),
                                config.getint(section, 'redis-db'),
                                decode_responses=True)

    def check_password(self, user, password):
        """Check the given user and password.

        Returns None on cache miss, True if password matches, False if not.
        """
        cached = self.conn.get(self.prefix('%s-pass' % user))
        if cached is None:
            return cached
        else:
            return cached == self.hash(password, cached)

    def set_password(self, user, password):
        self.conn.set(self.prefix('%s-pass' % user),
                      self.hash(password, None),
                      ex=self.expire)

    def in_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        if not self.conn.exists(key):
            return None

        return not self.conn.smembers(key).isdisjoint(groups)

    def set_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        pipe = self.conn.pipeline()
        pipe.sadd(key, *groups).expire(key, self.expire)
        pipe.execute()
Exemple #12
0
class Command(object):
    def __init__(self):
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])

    def run(self):
        log.debug("Updating mirror database")
        geoip = GeoIP(Config.GEOIP_PATH_V4)

        for status in mirror_statuses(unofficial_mirrors=Config.UNOFFICIAL_MIRRORS):
            name = status['mirror']
            if name == "a.pypi.python.org":
                # don't include 'a' in the list of mirrors - it's no mirror after all
                continue
            time_diff = status['time_diff']
            if not isinstance(time_diff, timedelta):
                continue

            log.debug("  Processing mirror '%s'", name)
            record = geoip.record_by_name(name)
            lat = record['latitude']
            lon = record['longitude']

            log.debug("    Age: %d, Lat: %0.5f, Lon: %0.5f", time_diff.total_seconds(), lat, lon)

            try:
                mirror = Mirror.objects.get(name=name)
            except ObjectNotFound:
                mirror = Mirror(name=name)
            mirror.age = time_diff.total_seconds()
            mirror.lat = lat
            mirror.lon = lon

            mirror.save()

        self.redis.set(Config.KEY_LAST_UPDATE, time.time())
        log.debug("Finished updating mirror database")
Exemple #13
0
class RedisStore:
    def __init__(self, db_host, db_port, db_num, db_pw):
        self.pool = ConnectionPool(max_connections=2,
                                   db=db_num,
                                   host=db_host,
                                   port=db_port,
                                   password=db_pw,
                                   decode_responses=True)
        self.redis = StrictRedis(connection_pool=self.pool)
        self.redis.ping()
        self._object_map = WeakValueDictionary()

    def create_object(self, dbo_class, dbo_dict, update_timestamp=True):
        dbo_class = get_dbo_class(getattr(dbo_class, 'dbo_key_type',
                                          dbo_class))
        if not dbo_class:
            return
        try:
            dbo_id = dbo_dict['dbo_id']
        except KeyError:
            dbo_id, dbo_dict = dbo_dict, {}
        if dbo_id is None or dbo_id == '':
            log.warn("create_object called with empty dbo_id")
            return
        dbo_id = str(dbo_id).lower()
        if self.object_exists(dbo_class.dbo_key_type, dbo_id):
            raise ObjectExistsError(dbo_id)
        dbo = dbo_class()
        dbo.dbo_id = dbo_id
        dbo.hydrate(dbo_dict)
        dbo.db_created()
        if dbo.dbo_set_key:
            self.redis.sadd(dbo.dbo_set_key, dbo.dbo_id)
        self.save_object(dbo, update_timestamp)
        return dbo

    def load_object(self, dbo_key, key_type=None, silent=False):
        if key_type:
            try:
                key_type = key_type.dbo_key_type
            except AttributeError:
                pass
            try:
                dbo_key, dbo_id = ':'.join((key_type, dbo_key)), dbo_key
            except TypeError:
                if not silent:
                    log.exception("Invalid dbo_key passed to load_object",
                                  stack_info=True)
                return
        else:
            key_type, _, dbo_id = dbo_key.partition(':')
        cached_dbo = self._object_map.get(dbo_key)
        if cached_dbo:
            return cached_dbo
        json_str = self.redis.get(dbo_key)
        if not json_str:
            if not silent:
                log.warn("Failed to find {} in database", dbo_key)
            return
        return self._json_to_obj(json_str, key_type, dbo_id)

    def save_object(self, dbo, update_timestamp=False, autosave=False):
        if update_timestamp:
            dbo.dbo_ts = int(time.time())
        if dbo.dbo_indexes:
            self._update_indexes(dbo)
        self._clear_old_refs(dbo)
        save_root, new_refs = dbo.to_db_value()
        self.redis.set(dbo.dbo_key, json_encode(save_root))
        if new_refs:
            self._set_new_refs(dbo, new_refs)
        log.debug("db object {} {}saved", dbo.dbo_key,
                  "auto" if autosave else "")
        self._object_map[dbo.dbo_key] = dbo
        return dbo

    def update_object(self, dbo, dbo_dict):
        dbo.hydrate(dbo_dict)
        return self.save_object(dbo, True)

    def delete_object(self, dbo):
        key = dbo.dbo_key
        dbo.db_deleted()
        self.delete_key(key)
        self._clear_old_refs(dbo)
        if dbo.dbo_set_key:
            self.redis.srem(dbo.dbo_set_key, dbo.dbo_id)
        for children_type in dbo.dbo_children_types:
            self.delete_object_set(
                get_dbo_class(children_type),
                "{}_{}s:{}".format(dbo.dbo_key_type, children_type,
                                   dbo.dbo_id))
        for ix_name in dbo.dbo_indexes:
            ix_value = getattr(dbo, ix_name, None)
            if ix_value is not None and ix_value != '':
                self.delete_index('ix:{}:{}'.format(dbo.dbo_key_type, ix_name),
                                  ix_value)
        log.debug("object deleted: {}", key)
        self.evict_object(dbo)

    def load_cached(self, dbo_key):
        return self._object_map.get(dbo_key)

    def object_exists(self, obj_type, obj_id):
        return self.redis.exists('{}:{}'.format(obj_type, obj_id))

    def load_object_set(self, dbo_class, set_key=None):
        dbo_class = get_dbo_class(getattr(dbo_class, 'dbo_key_type',
                                          dbo_class))
        key_type = dbo_class.dbo_key_type
        if not set_key:
            set_key = dbo_class.dbo_set_key
        results = set()
        keys = deque()
        pipeline = self.redis.pipeline()
        for key in self.fetch_set_keys(set_key):
            dbo_key = ':'.join([key_type, key])
            try:
                results.add(self._object_map[dbo_key])
            except KeyError:
                keys.append(key)
                pipeline.get(dbo_key)
        for dbo_id, json_str in zip(keys, pipeline.execute()):
            if json_str:
                obj = self._json_to_obj(json_str, key_type, dbo_id)
                if obj:
                    results.add(obj)
                continue
            log.warn("Removing missing object from set {}", set_key)
            self.delete_set_key(set_key, dbo_id)
        return results

    def delete_object_set(self, dbo_class, set_key=None):
        if not set_key:
            set_key = dbo_class.dbo_set_key
        for dbo in self.load_object_set(dbo_class, set_key):
            self.delete_object(dbo)
        self.delete_key(set_key)

    def reload_object(self, dbo_key):
        dbo = self._object_map.get(dbo_key)
        if dbo:
            json_str = self.redis.get(dbo_key)
            if not json_str:
                log.warn("Failed to find {} in database for reload", dbo_key)
                return None
            return self.update_object(dbo, json_decode(json_str))
        return self.load_object(dbo_key)

    def evict_object(self, dbo):
        self._object_map.pop(dbo.dbo_key, None)

    def load_value(self, key, default=None):
        json = self.redis.get(key)
        if json:
            return json_decode(json)
        return default

    def save_value(self, key, value):
        self.redis.set(key, json_encode(value))

    def fetch_set_keys(self, set_key):
        return self.redis.smembers(set_key)

    def add_set_key(self, set_key, *values):
        self.redis.sadd(set_key, *values)

    def delete_set_key(self, set_key, value):
        self.redis.srem(set_key, value)

    def set_key_exists(self, set_key, value):
        return self.redis.sismember(set_key, value)

    def db_counter(self, counter_id, inc=1):
        return self.redis.incr("counter:{}".format(counter_id), inc)

    def delete_key(self, key):
        self.redis.delete(key)

    def set_index(self, index_name, key, value):
        return self.redis.hset(index_name, key, value)

    def get_index(self, index_name, key):
        return self.redis.hget(index_name, key)

    def get_full_index(self, index_name):
        return self.redis.hgetall(index_name)

    def delete_index(self, index_name, key):
        return self.redis.hdel(index_name, key)

    def get_all_hash(self, index_name):
        return {
            key: json_decode(value)
            for key, value in self.redis.hgetall(index_name).items()
        }

    def get_hash_keys(self, hash_id):
        return self.redis.hkeys(hash_id)

    def set_db_hash(self, hash_id, hash_key, value):
        return self.redis.hset(hash_id, hash_key, json_encode(value))

    def get_db_hash(self, hash_id, hash_key):
        return json_decode(self.redis.hget(hash_id, hash_key))

    def remove_db_hash(self, hash_id, hash_key):
        self.redis.hdel(hash_id, hash_key)

    def get_all_db_hash(self, hash_id):
        return [
            json_decode(value)
            for value in self.redis.hgetall(hash_id).values()
        ]

    def get_db_list(self, list_id, start=0, end=-1):
        return [
            json_decode(value)
            for value in self.redis.lrange(list_id, start, end)
        ]

    def add_db_list(self, list_id, value):
        self.redis.lpush(list_id, json_encode(value))

    def trim_db_list(self, list_id, start, end):
        return self.redis.ltrim(list_id, start, end)

    def dbo_holders(self, dbo_key, degrees=0):
        all_keys = set()

        def find(find_key, degree):
            holder_keys = self.fetch_set_keys('{}:holders'.format(find_key))
            for new_key in holder_keys:
                if new_key != dbo_key and new_key not in all_keys:
                    all_keys.add(new_key)
                    if degree < degrees:
                        find(new_key, degree + 1)

        find(dbo_key, 0)
        return all_keys

    def _json_to_obj(self, json_str, key_type, dbo_id):
        dbo_dict = json_decode(json_str)
        dbo = get_mixed_type(key_type, dbo_dict.get('mixins'))()
        dbo.dbo_id = dbo_id
        dbo.hydrate(dbo_dict)
        self._object_map[dbo.dbo_key] = dbo
        return dbo

    def _update_indexes(self, dbo):
        try:
            old_dbo = json_decode(self.redis.get(dbo.dbo_key))
        except TypeError:
            old_dbo = None

        for ix_name in dbo.dbo_indexes:
            new_val = getattr(dbo, ix_name, None)
            old_val = old_dbo.get(ix_name) if old_dbo else None
            if old_val == new_val:
                continue
            ix_key = 'ix:{}:{}'.format(dbo.dbo_key_type, ix_name)
            if old_val is not None:
                self.delete_index(ix_key, old_val)
            if new_val is not None and new_val != '':
                if self.get_index(ix_key, new_val):
                    raise NonUniqueError(ix_key, new_val)
                self.set_index(ix_key, new_val, dbo.dbo_id)

    def _clear_old_refs(self, dbo):
        dbo_key = dbo.dbo_key
        ref_key = '{}:refs'.format(dbo_key)
        for ref_id in self.fetch_set_keys(ref_key):
            holder_key = '{}:holders'.format(ref_id)
            self.delete_set_key(holder_key, dbo_key)
        self.delete_key(ref_key)

    def _set_new_refs(self, dbo, new_refs):
        dbo_key = dbo.dbo_key
        self.add_set_key("{}:refs".format(dbo_key), *new_refs)
        for ref_id in new_refs:
            holder_key = '{}:holders'.format(ref_id)
            self.add_set_key(holder_key, dbo_key)
class RedisStore():
    def __init__(self, db_host, db_port, db_num, db_pw):
        self.pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw,
                                   decode_responses=True)
        self.redis = StrictRedis(connection_pool=self.pool)
        self.redis.ping()
        self._object_map = WeakValueDictionary()

    def create_object(self, dbo_class, dbo_dict, update_timestamp=True):
        dbo_class = get_dbo_class(getattr(dbo_class, 'dbo_key_type', dbo_class))
        if not dbo_class:
            return
        try:
            dbo_id = dbo_dict['dbo_id']
        except KeyError:
            dbo_id, dbo_dict = dbo_dict, {}
        if dbo_id is None or dbo_id == '':
            warn("create_object called with empty dbo_id")
            return
        dbo_id = str(dbo_id).lower()
        if self.object_exists(dbo_class.dbo_key_type, dbo_id):
            raise ObjectExistsError(dbo_id)
        dbo = dbo_class()
        dbo.dbo_id = dbo_id
        dbo.hydrate(dbo_dict)
        dbo.db_created()
        if dbo.dbo_set_key:
            self.redis.sadd(dbo.dbo_set_key, dbo.dbo_id)
        self.save_object(dbo, update_timestamp)
        return dbo

    def save_object(self, dbo, update_timestamp=False, autosave=False):
        if update_timestamp:
            dbo.dbo_ts = int(time.time())
        if dbo.dbo_indexes:
            self._update_indexes(dbo)
        self._clear_old_refs(dbo)
        save_root, new_refs = dbo.to_db_value()
        self.redis.set(dbo.dbo_key, json_encode(save_root))
        if new_refs:
            self._set_new_refs(dbo, new_refs)
        debug("db object {} {}saved", dbo.dbo_key, "auto" if autosave else "")
        self._object_map[dbo.dbo_key] = dbo
        return dbo

    def save_raw(self, key, raw):
        self.redis.set(key, json_encode(raw))

    def load_raw(self, key, default=None):
        json = self.redis.get(key)
        if json:
            return json_decode(json)
        return default

    def load_cached(self, dbo_key):
        return self._object_map.get(dbo_key)

    def load_object(self, dbo_key, key_type=None, silent=False):
        if key_type:
            try:
                key_type = key_type.dbo_key_type
            except AttributeError:
                pass
            try:
                dbo_key, dbo_id = ':'.join([key_type, dbo_key]), dbo_key
            except TypeError:
                if not silent:
                    exception("Invalid dbo_key passed to load_object", stack_info=True)
                return
        else:
            key_type, _, dbo_id = dbo_key.partition(':')
        cached_dbo = self._object_map.get(dbo_key)
        if cached_dbo:
            return cached_dbo
        json_str = self.redis.get(dbo_key)
        if not json_str:
            if not silent:
                warn("Failed to find {} in database", dbo_key)
            return
        return self.load_from_json(json_str, key_type, dbo_id)

    def load_from_json(self, json_str, key_type, dbo_id):
        dbo_dict = json_decode(json_str)
        dbo = get_mixed_type(key_type, dbo_dict.get('mixins'))()
        dbo.dbo_id = dbo_id
        self._object_map[dbo.dbo_key] = dbo
        dbo.hydrate(dbo_dict)
        return dbo

    def object_exists(self, obj_type, obj_id):
        return self.redis.exists('{}:{}'.format(obj_type, obj_id))

    def load_object_set(self, dbo_class, set_key=None):
        key_type = dbo_class.dbo_key_type
        if not set_key:
            set_key = dbo_class.dbo_set_key
        results = set()
        keys = deque()
        pipeline = self.redis.pipeline()
        for key in self.fetch_set_keys(set_key):
            dbo_key = ':'.join([key_type, key])
            try:
                results.add(self._object_map[dbo_key])
            except KeyError:
                keys.append(key)
                pipeline.get(dbo_key)
        for dbo_id, json_str in zip(keys, pipeline.execute()):
            if json_str:
                obj = self.load_from_json(json_str, key_type, dbo_id)
                if obj:
                    results.add(obj)
                continue
            warn("Removing missing object from set {}", set_key)
            self.delete_set_key(set_key, dbo_id)
        return results

    def delete_object_set(self, dbo_class, set_key=None):
        if not set_key:
            set_key = dbo_class.dbo_set_key
        for dbo in self.load_object_set(dbo_class, set_key):
            self.delete_object(dbo)
        self.delete_key(set_key)

    def update_object(self, dbo, dbo_dict):
        dbo.hydrate(dbo_dict)
        return self.save_object(dbo, True)

    def delete_object(self, dbo):
        key = dbo.dbo_key
        dbo.db_deleted()
        self.delete_key(key)
        self._clear_old_refs(dbo)
        if dbo.dbo_set_key:
            self.redis.srem(dbo.dbo_set_key, dbo.dbo_id)
        for children_type in dbo.dbo_children_types:
            self.delete_object_set(get_dbo_class(children_type),
                                   "{}_{}s:{}".format(dbo.dbo_key_type, children_type, dbo.dbo_id))
        for ix_name in dbo.dbo_indexes:
            ix_value = getattr(dbo, ix_name, None)
            if ix_value is not None and ix_value != '':
                self.delete_index('ix:{}:{}'.format(dbo.dbo_key_type, ix_name), ix_value)
        debug("object deleted: {}", key)
        self.evict_object(dbo)

    def reload_object(self, dbo_key):
        dbo = self._object_map.get(dbo_key)
        if dbo:
            json_str = self.redis.get(dbo_key)
            if not json_str:
                warn("Failed to find {} in database for reload", dbo_key)
                return None
            return self.update_object(dbo, json_decode(json_str))
        return self.load_object(dbo_key)

    def evict_object(self, dbo):
        self._object_map.pop(dbo.dbo_key, None)

    def fetch_set_keys(self, set_key):
        return self.redis.smembers(set_key)

    def add_set_key(self, set_key, *values):
        self.redis.sadd(set_key, *values)

    def delete_set_key(self, set_key, value):
        self.redis.srem(set_key, value)

    def set_key_exists(self, set_key, value):
        return self.redis.sismember(set_key, value)

    def db_counter(self, counter_id, inc=1):
        return self.redis.incr("counter:{}".format(counter_id), inc)

    def delete_key(self, key):
        self.redis.delete(key)

    def set_index(self, index_name, key, value):
        return self.redis.hset(index_name, key, value)

    def get_index(self, index_name, key):
        return self.redis.hget(index_name, key)

    def get_full_index(self, index_name):
        return self.redis.hgetall(index_name)

    def delete_index(self, index_name, key):
        return self.redis.hdel(index_name, key)

    def get_all_hash(self, index_name):
        return {key: json_decode(value) for key, value in self.redis.hgetall(index_name).items()}

    def set_db_hash(self, hash_id, hash_key, value):
        return self.redis.hset(hash_id, hash_key, json_encode(value))

    def get_db_hash(self, hash_id, hash_key):
        return json_decode(self.redis.hget(hash_id, hash_key))

    def remove_db_hash(self, hash_id, hash_key):
        self.redis.hdel(hash_id, hash_key)

    def get_all_db_hash(self, hash_id):
        return [json_decode(value) for value in self.redis.hgetall(hash_id).values()]

    def get_db_list(self, list_id, start=0, end=-1):
        return [json_decode(value) for value in self.redis.lrange(list_id, start, end)]

    def add_db_list(self, list_id, value):
        self.redis.lpush(list_id, json_encode(value))

    def trim_db_list(self, list_id, start, end):
        return self.redis.ltrim(list_id, start, end)

    def _update_indexes(self, dbo):
        try:
            old_dbo = json_decode(self.redis.get(dbo.dbo_key))
        except TypeError:
            old_dbo = None

        for ix_name in dbo.dbo_indexes:
            new_val = getattr(dbo, ix_name, None)
            old_val = old_dbo.get(ix_name) if old_dbo else None
            if old_val == new_val:
                continue
            ix_key = 'ix:{}:{}'.format(dbo.dbo_key_type, ix_name)
            if old_val is not None:
                self.delete_index(ix_key, old_val)
            if new_val is not None and new_val != '':
                if self.get_index(ix_key, new_val):
                    raise NonUniqueError(ix_key, new_val)
                self.set_index(ix_key, new_val, dbo.dbo_id)

    def _clear_old_refs(self, dbo):
        dbo_key = dbo.dbo_key
        ref_key = '{}:refs'.format(dbo_key)
        for ref_id in self.fetch_set_keys(ref_key):
            holder_key = '{}:holders'.format(ref_id)
            self.delete_set_key(holder_key, dbo_key)
        self.delete_key(ref_key)

    def _set_new_refs(self, dbo, new_refs):
        dbo_key = dbo.dbo_key
        self.add_set_key("{}:refs".format(dbo_key), *new_refs)
        for ref_id in new_refs:
            holder_key = '{}:holders'.format(ref_id)
            self.add_set_key(holder_key, dbo_key)
from redis.client import StrictRedis

if __name__ == '__main__':
    strict_redis = StrictRedis(host='127.0.0.1',
                               port=6379,
                               db=0,
                               decode_responses=True)

    strict_redis.set('aa', '111')
    strict_redis.set('bb', '222')
    strict_redis.set('cc', '333')

    aa = strict_redis.get('aa')
    bb = strict_redis.get('bb')
    cc = strict_redis.get('cc')
    dd = strict_redis.get('dd')

    print(aa, bb, cc)

    keys = strict_redis.keys()
    print(keys, type(keys))
Exemple #16
0
class RedisStore():
    def __init__(self, db_host, db_port, db_num, db_pw):
        pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw)
        self.redis = StrictRedis(connection_pool=pool)
        self.class_map = {}
        self.object_map = {}

    def create_object(self, dbo, update_rev=False):
        self.save_object(dbo, update_rev)
        dbo.on_loaded()

    def save_object(self, dbo, update_rev=False, autosave=False):
        if update_rev:
            rev = getattr(dbo, "dbo_rev", None)
            dbo.dbo_rev = 1 if not rev else rev + 1
        dbo.before_save()
        key = dbo.dbo_key
        self.redis.set(key, self.json_encode(dbo.save_json_obj))
        if dbo.dbo_set_key:
            self.redis.sadd(dbo.dbo_set_key, key)
        self.dispatch("db_log{0}".format("_auto" if autosave else ""), "object saved: " + key)
        self.object_map[dbo.dbo_key] = dbo

    def load_cached(self, key):
        return self.object_map.get(key)

    def evict_object(self, dbo):
        try:
            del self.object_map[dbo.dbo_key]
        except:
            debug("Failed to evict " + dbo.dbo_key + " from db cache", self)

    @logged
    def load_by_key(self, key_type, key, base_class=None):
        dbo_key = unicode('{0}:{1}'.format(key_type, key))
        cached_dbo = self.object_map.get(dbo_key)
        if cached_dbo:
            return cached_dbo
        json_str = self.redis.get(dbo_key)
        if not json_str:
            return None
        json_obj = self.json_decode(json_str)
        dbo = self._load_class(json_obj, base_class)(key)
        if dbo.dbo_key_type:
            self.object_map[dbo.dbo_key] = dbo
        self.load_json(dbo, json_obj)
        return dbo

    def object_exists(self, obj_type, obj_id):
        key = unicode('{0}:{1}'.format(obj_type, obj_id))
        return key in self.redis.keys(key)

    def load_object(self, dbo_class, key):
        return self.load_by_key(dbo_class.dbo_key_type, key, dbo_class)

    def update_object(self, dbo, json_obj):
        self.load_json(dbo, json_obj)
        self.save_object(dbo, True)

    def delete_object(self, dbo):
        key = dbo.dbo_key
        self.redis.delete(key)
        if dbo.dbo_set_key:
            self.redis.srem(dbo.dbo_set_key, key)
        for dbo_col in dbo.dbo_collections:
            if dbo_col.key_type:
                coll = getattr(dbo, dbo_col.field_name, set())
                for child_dbo in coll:
                    self.delete_object(child_dbo)
        debug("object deleted: " + key, self)
        if self.object_map.get(dbo.dbo_key):
            del self.object_map[dbo.dbo_key]
        return True

    def fetch_set_keys(self, set_key):
        return self.redis.smembers(set_key)

    def set_index(self, index_name, key, value):
        return self.redis.hset(index_name, key, value)

    def get_index(self, index_name, key):
        return self.redis.hget(index_name, key)

    def delete_index(self, index_name, key):
        return self.redis.hdel(index_name, key)

    def _load_class(self, json_obj, base_class):
        class_path = json_obj.get("class_name")
        if not class_path:
            return self.cls_registry(base_class)
        clazz = self.class_map.get(class_path)
        if clazz:
            return clazz
        split_path = class_path.split(".")
        module_name = ".".join(split_path[:-1])
        class_name = split_path[-1]
        module = __import__(module_name, globals(), locals(), [class_name])
        clazz = getattr(module, class_name)
        clazz = self.cls_registry(clazz)
        self.class_map[class_path] = clazz
        return clazz

    def load_json(self, dbo, json_obj):
        for field_name in dbo.dbo_fields:
            try:
                setattr(dbo, field_name, json_obj[field_name])
            except KeyError:
                pass
        for dbo_col in dbo.dbo_collections:
            coll = getattr(dbo, dbo_col.field_name, [])
            try:
                for child_json in json_obj[dbo_col.field_name]:
                    try:
                        if dbo_col.key_type:
                            child_dbo = self.load_by_key(dbo_col.key_type, child_json, dbo_col.base_class)
                        else:
                            child_dbo = self._load_class(child_json, dbo_col.base_class)()
                            self.load_json(child_dbo, child_json)
                        coll.append(child_dbo)
                    except AttributeError:
                        warn("{0} json failed to load for coll {1} in {2}".format(child_json, dbo_col.field_name, unicode(dbo.dbo_id)), self)
            except KeyError:
                if dbo.dbo_key_type:
                    trace("db: Object " + unicode(dbo.dbo_debug_key) + " json missing collection " + dbo_col.field_name, self)

        for dbo_ref in dbo.dbo_refs:
            try:
                ref_key = json_obj[dbo_ref.field_name]
                ref_obj = self.load_by_key(dbo_ref.key_type, ref_key, dbo_ref.base_class)
                setattr(dbo, dbo_ref.field_name, ref_obj)
            except:
                if dbo.dbo_key_type:
                    trace("db: Object " + unicode(dbo.dbo_debug_key) + " json missing ref " + dbo_ref.field_name, self)
        dbo.on_loaded()
        return True
Exemple #17
0
# -*- coding: gbk -*- 
'''
Created on 2012-5-26

@author: Sky
'''
from redis.client import StrictRedis
Sr = StrictRedis(host='localhost', port=6379, db=0)
Sr.set('foo', 'bar')
#print(str(Sr.get('foo'), encoding = "utf-8") == 'bar')
print(Sr.get('foo'))
Sr.hset("MyHash", "field1", "ль¤У")
print(Sr.hget("MyHash", "field11"))
Sr.rpush("list", "one")
Sr.rpush("list", "two")
print(Sr.llen("list"))
Sr.ltrim("list", 1, 0)
print(Sr.llen("list"))
Sr.hset("MyHash", "Key1", "Value1")
Sr.hset("MyHash", "Key2", "Value2")
for i in Sr.hkeys("MyHash"):
    print(i)
print(Sr.hlen("PlayerHash"))
print(Sr.get("XXX"))
print(type(Sr.smembers("EnemyTemplate:16:LOOT")))
for i in Sr.smembers("EnemyTemplate:16:LOOT"):
    print(i)

Exemple #18
0
class RedisDataSource(AbstractDataSource):

    _r = None

    def __init__(self, config):
        if self._validateConfig(config):
            self._r = StrictRedis(
                host=config[REDIS_DATASOURCE_CONFIG]
                [REDIS_DATASOURCE_CONFIG_HOST],
                port=config[REDIS_DATASOURCE_CONFIG]
                [REDIS_DATASOURCE_CONFIG_PORT],
                db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB])
            logger.debug("Obtained internal redis handler" + str(self._r))
        else:
            raise BaseException("Error validating config ")

    def update(self, item):
        self.store(item)

    def store(self, item):
        self._r.set(item.getHash(), item.getValue())

    def get(self, item):
        return self._r.get(item.getHash())

    def exists(self, item):
        return self.get(item) is not None

    def all(self):

        result = []
        # Obtain all keys
        keys = self._r.keys()

        #For each key, get value
        for k in keys:
            value = self._r.get(k)
            result.append(BaseItem({"origin": "redis"}, value))
        #return result
        return result

    def _validateConfig(self, config):

        validator = MultipleConfigValidator({
            VALIDATORS_LIST:
            [ContainsKeyConfigValidator({KEY_VALUE: REDIS_DATASOURCE_CONFIG})]
        })
        if not validator.validate(config):
            raise BaseException("Config validation error : does not contain " +
                                REDIS_DATASOURCE_CONFIG)

        # Validate redis datasource config
        validator = MultipleConfigValidator({
            VALIDATORS_LIST: [
                ContainsKeysConfigValidator({
                    KEYS_LIST: [
                        REDIS_DATASOURCE_CONFIG_DB,
                        REDIS_DATASOURCE_CONFIG_HOST,
                        REDIS_DATASOURCE_CONFIG_PORT
                    ]
                })
            ]
        })

        if not validator.validate(config[REDIS_DATASOURCE_CONFIG]):
            raise BaseException(
                "Config validation error : config not complete ")

        return True

    def delete(self, item):
        self._r.delete(item.getHash())
Exemple #19
0
class RedisStore():
    def __init__(self, dispatcher, db_host, db_port, db_num, db_pw):
        self.dispatcher = dispatcher
        pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw)
        self.redis = StrictRedis(connection_pool=pool)
        self.encoder = JSONEncoder()
        self.decoder = JSONDecoder()
        self.class_map = {}
        self.object_map = {}
    
    def create_object(self, dbo, update_rev=False):
        self.save_object(dbo)
        dbo.on_loaded()
            
    def save_object(self, dbo, update_rev=False, autosave=False):
        if update_rev:
            dbo.dbo_rev = getattr(dbo, "dbo_rev", 0) + 1
        json_obj = self.build_json(dbo)
        key = dbo.dbo_key
        self.redis.set(key, self.encoder.encode(json_obj))
        if dbo.dbo_set_key:
            self.redis.sadd(dbo.dbo_set_key, key)
        self.dispatcher.dispatch("db_log{0}".format("_auto" if autosave else ""), "object saved: " + key)
        self.object_map[dbo.dbo_key] = dbo;
    
    def build_json(self, dbo):
        dbo.before_save()
        json_obj = {}
        if dbo.__class__ != dbo.dbo_base_class:
            json_obj["class_name"] = dbo.__module__ + "." + dbo.__class__.__name__
        for field_name in dbo.dbo_fields:
            json_obj[field_name] = getattr(dbo, field_name, None)
        for dbo_col in dbo.dbo_collections:
            coll_list = list()
            for child_dbo in getattr(dbo, dbo_col.field_name):
                if dbo_col.key_type:
                    coll_list.append(child_dbo.dbo_id)
                else:
                    coll_list.append(self.build_json(child_dbo))
            json_obj[dbo_col.field_name] = coll_list
        for dbo_ref in dbo.dbo_refs:
            ref = getattr(dbo, dbo_ref.field_name, None)
            if ref:
                json_obj[dbo_ref.field_name] = ref.dbo_id   
        return json_obj
    
    def cache_object(self, dbo):
        self.object_map[dbo.dbo_key]
    
    def load_cached(self, key):
        return self.object_map.get(key)
    
    def evict(self, dbo):
        try:
            del self.object_map[dbo.dbo_key]
        except:
            self.dispatcher.dispatch("db_log", "Failed to evict " + dbo.dbo_key + " from db cache")
                
    def load_by_key(self, key_type, key, base_class=None):
        dbo_key = key_type + ":" + key
        cached_dbo = self.object_map.get(dbo_key)
        if cached_dbo:
            return cached_dbo
        json_str = self.redis.get(dbo_key)
        if not json_str:
            return None
        json_obj = self.decoder.decode(json_str)
        dbo = self.load_class(json_obj, base_class)(key)
        if dbo.dbo_key_type:
            self.object_map[dbo.dbo_key] = dbo
        self.load_json(dbo, json_obj)
        return dbo
        
    def load_class(self, json_obj, base_class):
        class_path = json_obj.get("class_name")
        if not class_path:
            return base_class
        clazz = self.class_map.get(class_path)
        if clazz:
            return clazz
        split_path = class_path.split(".")
        module_name = ".".join(split_path[:-1])
        class_name = split_path[-1]
        module = __import__(module_name, globals(), locals(), [class_name])
        clazz = getattr(module, class_name)
        self.class_map[class_path] = clazz
        return clazz 
    
    def load_object(self, dbo_class, key):
        return self.load_by_key(dbo_class.dbo_key_type, key, dbo_class)
    
    def load_json(self, dbo, json_obj):
        
        for field_name in dbo.dbo_fields:
            try:
                setattr(dbo, field_name, json_obj[field_name])
            except KeyError:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing field " + field_name)
        for dbo_col in dbo.dbo_collections:
            coll = getattr(dbo, dbo_col.field_name, [])
            try:
                for child_json in json_obj[dbo_col.field_name]:
                    if dbo_col.key_type:
                        child_dbo = self.load_by_key(dbo_col.key_type, child_json, dbo_col.base_class)
                    else:
                        child_dbo = self.load_class(child_json, dbo_col.base_class)()
                        self.load_json(child_dbo, child_json)
                    coll.append(child_dbo)
            except AttributeError:
                self.dispatcher.dispatch("db_log", "{0} json failed to load for coll {1} in {2}".format(child_json, dbo_col.field_name, dbo.dbo_id))
            except KeyError:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing collection " + dbo_col.field_name)
        
        for dbo_ref in dbo.dbo_refs:
            try:
                ref_key = json_obj[dbo_ref.field_name]
                ref_obj = self.load_by_key(dbo_ref.key_type, ref_key, dbo_ref.base_class)
                setattr(dbo, dbo_ref.field_name, ref_obj)    
            except:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing ref " + dbo_ref.field_name)
        dbo.on_loaded()    
        return True
                    
    def delete_object(self, dbo):
        key = dbo.dbo_key
        self.redis.delete(key)
        if dbo.dbo_set_key:
            self.redis.srem(dbo.dbo_set_key, key)
        for dbo_col in dbo.dbo_collections:
            if dbo_col.key_type:
                coll = getattr(dbo, dbo_col.field_name, set())
                for child_dbo in coll:
                    self.delete_object(child_dbo)
        self.dispatcher.dispatch("db_log", "object deleted: " + key)
        if self.object_map.get(dbo.dbo_key):
            del self.object_map[dbo.dbo_key]
        return True
        
    def fetch_set_keys(self, set_key):
        return self.redis.smembers(set_key)