Ejemplo n.º 1
0
    def test_connection_exceptions(self, srs):

        # Default: losing connection raises connection error
        strm_raise_conn_loss = srs.streams(streams=streams, block=10)
        srs.xadd("S3_" + long_suffix, index=4000)
        _ = strm_raise_conn_loss.next()
        strm_raise_conn_loss.connection = StrictRedis(host=long_suffix)
        with pytest.raises(redis.exceptions.ConnectionError):
            _ = strm_raise_conn_loss.next()

        # returning connection error rather than raising it
        strm_ret_conn_loss = srs.streams(streams=streams,
                                         block=10,
                                         raise_connection_exceptions=False)
        srs.xadd("S3_" + long_suffix, index=4000)
        _ = strm_ret_conn_loss.next()
        real_connection = strm_ret_conn_loss.connection

        # simulate lost connection
        strm_ret_conn_loss.connection = StrictRedis(host=long_suffix)
        msg = strm_ret_conn_loss.next()
        assert isinstance(msg, ConnectionError)

        # simulate restored connection
        strm_ret_conn_loss.connection = real_connection
        msg = strm_ret_conn_loss.next()
        assert msg is None
Ejemplo n.º 2
0
def rfunc(info):
    #
    info.setdefault('host', '127.0.0.1')
    info.setdefault('port', 6379)

    #
    conn_kwargs = dict(
        host=info['host'],
        port=int(info['port']),
    )

    #
    print('# Connect')
    print(pformat(conn_kwargs, indent=4, width=1))

    #
    conn = None

    try:
        #
        conn = StrictRedis(**conn_kwargs)

        #
        res = '\n'.join('{:<30}: {}'.format(k, v) for k, v in conn.info().items())

        #
        print('# Result')
        print(res)

    finally:
        #
        conn = None
Ejemplo n.º 3
0
class IPPool(object):
    '''爬取芝麻http免费ip'''
    def __init__(self):
        self.url = 'http://wapi.http.linkudp.com/index/index/get_free_ip'
        self.redis_client = StrictRedis(host='127.0.0.1', port=6379)
        self.headers = {
            'User-Agent':
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
        }
        self.session = requests.Session()

    def run(self):
        for page in range(1, 485):
            data = {'page': page}
            response = self.session.post(self.url,
                                         data=data,
                                         headers=self.headers)
            print(response.status_code)
            html = json.loads(response.content.decode())['ret_data']['html']
            ip_list = re.findall(r'FREE</span>(.+?)</td>\s+<td>(\d+?)</td>',
                                 html)
            ip_list = ['http://' + i[0] + ':' + i[1] for i in ip_list]
            for ip in ip_list:
                print(ip)
                self.redis_client.sadd('ippool', ip)
Ejemplo n.º 4
0
class Record(object):
    def __init__(self,host='127.0.0.1',port=6379):         
        self.r=StrictRedis()
    
    def run(self):
        while True:
            value=self.r.rpop('alerts')
            if value:
                obj=json.loads(value)
                keyredis=obj['src_ip']+'_'+str(obj['src_port'])+'_'+ obj['dest_ip']+'_'+str(obj['dest_port'])
                entry=self.r.get(keyredis)
                if entry:
                    restruct=json.loads(entry)
                else:
                    restruct={}
                if not 'http' in restruct:
                    restruct['http']=[]
                if not 'alerts' in restruct:
                    restruct['alerts']=[]
                if not 'files' in restruct:
                    restruct['files']=[]  
                if 'alert' in obj:    
                    restruct['alerts'].append(obj['alert']['signature'])
                if 'fileinfo' in obj:
                    restruct['files'].append(obj['fileinfo'])
                if 'http' in obj:
                    restruct['http'].append(obj['http'])
                if len(restruct)>0:
                    self.r.set(keyredis, json.dumps(restruct))
            else:
                sleep(1)
Ejemplo n.º 5
0
 def __init__(self, size=5, test_url="http://www.baidu.com"):
     self.test_url = test_url
     self.size = size
     self.redis_db = StrictRedis()
     # 队列
     self.proxy = Queue()
     self.headers = {"User-Agent": UserAgent().chrome}
Ejemplo n.º 6
0
class redis(object):
    def __init__(self,host='127.0.0.1',port=6379):
        self.r = StrictRedis(host,port)
    def rec(self,k,v):
        self.r.set(k, v)
    def rpush(self,v):
        self.r.rpush('alerts',v)
Ejemplo n.º 7
0
 def __init__(self):
     self.url = 'http://wapi.http.linkudp.com/index/index/get_free_ip'
     self.redis_client = StrictRedis(host='127.0.0.1', port=6379)
     self.headers = {
         'User-Agent':
         'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
     }
     self.session = requests.Session()
Ejemplo n.º 8
0
 def __init__(self):
     # Load the GeoIP databases into class attributes since they each need 20+ MB in memory
     if not self.__class__._geoip4:
         self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE)
     if not self.__class__._geoip6:
         self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE)
     self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'],
                              Config.REDIS['DB'])
Ejemplo n.º 9
0
def main():
    r = StrictRedis(host='localhost', port=6379, db=0)

    ps = r.pubsub()

    ps.subscribe("logs")
    data = ps.listen()
    print(data)
Ejemplo n.º 10
0
def rewrite_redis_aof_job():
    current_veil_env = get_current_veil_env()
    if not hasattr(current_veil_env.config, 'redis_servers'):
        return
    for host, port in current_veil_env.config.redis_servers:
        client = StrictRedis(host=host, port=port)
        if client.config_get('appendonly')['appendonly'] != 'yes':
            continue
        client.bgrewriteaof()
Ejemplo n.º 11
0
 def __init__(self, db_host, db_port, db_num, db_pw):
     self.pool = ConnectionPool(max_connections=2,
                                db=db_num,
                                host=db_host,
                                port=db_port,
                                password=db_pw,
                                decode_responses=True)
     self.redis = StrictRedis(connection_pool=self.pool)
     self.redis.ping()
     self._object_map = WeakValueDictionary()
Ejemplo n.º 12
0
 def __init__(self, config):
     if self._validateConfig(config):
         self._r = StrictRedis(
             host=config[REDIS_DATASOURCE_CONFIG]
             [REDIS_DATASOURCE_CONFIG_HOST],
             port=config[REDIS_DATASOURCE_CONFIG]
             [REDIS_DATASOURCE_CONFIG_PORT],
             db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB])
         logger.debug("Obtained internal redis handler" + str(self._r))
     else:
         raise BaseException("Error validating config ")
Ejemplo n.º 13
0
 def persist(self, r: StrictRedis):
     print("Writing team to redis")
     r.sadd("quiz:teams", self.id)
     r.hset(
         "quiz:team", self.id,
         json.dumps({
             'id': self.id,
             'name': self.name,
             'key': self.key,
             'score': self.score
         }))
Ejemplo n.º 14
0
 def __init__(self):
     """初始化对象"""
     self.base_url = 'http://www.neihanpa.com/article'
     self.start_index = int(raw_input('请输入开始页:'))
     self.end_index = int(raw_input('请输入结束页:'))
     self.headers = HEADERS_USER
     # 创建队列存储页面
     self.queue = Queue(int(self.end_index - self.start_index))
     #  创建匹配规则获取urls
     self.xpath_urls = '//a[@class="title"and @title]/@href '
     # 创建Redis链接
     self.redis_cli = StrictRedis('127.0.0.1')
Ejemplo n.º 15
0
 def __init_rd(self, master=False):
     if self.rd is None:
         if master:
             self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2])
             self._master_rd = True
         else:
             server = random.choice(_redis_servers)
             self.rd = StrictRedis(host=server[0], port=server[1], db=server[2])
             self._master_rd = False
     elif master and not self._master_rd:
         self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2])
         self._master_rd = True
Ejemplo n.º 16
0
def get_cart_count(request, strict_redis: StrictRedis):
    """获取购物车商品总数量"""
    count_list = strict_redis.hvals('cart_%s' % request.user.id)
    total_count = 0
    for count in count_list:
        total_count += int(count)
    return total_count
Ejemplo n.º 17
0
def redis(app=None):
    app = app_or_default(app)

    if not hasattr(app, "redbeat_redis") or app.redbeat_redis is None:
        app.redbeat_redis = StrictRedis.from_url(app.conf.REDBEAT_REDIS_URL, decode_responses=True)

    return app.redbeat_redis
Ejemplo n.º 18
0
 def setUp(self):
     self.testContext.startMock()
     self.redisClient = StrictRedis('127.0.0.1', 6379, 0)
     daobase.executeMixCmd = self.runRedisCmd
     daobase._executePayDataCmd = self.runRedisCmd
     daobase.executeUserCmd = self.runUserRedisCmd
     
     self.testContext.configure.setJson('game:9999:map.clientid', clientIdMap, 0)
     self.testContext.configure.setJson('game:9999:item', item_conf, 0)
     self.testContext.configure.setJson('game:9999:products', products_conf, 0)
     self.testContext.configure.setJson('game:9999:store', store_template_conf, 0)
     self.testContext.configure.setJson('game:9999:store', store_default_conf, clientIdMap[self.clientId])
     self.testContext.configure.setJson('game:9999:vip', vip_conf, 0)
     self.testContext.configure.setJson('game:9999:benefits', benefits_conf, 0)
     self.testContext.configure.setJson('game:9999:share', share_conf, 0)
     self.timestamp = pktimestamp.getCurrentTimestamp()
     self.pktimestampPatcher = patch('poker.util.timestamp.getCurrentTimestamp', self.getCurrentTimestamp)
     self.pktimestampPatcher.start()
     
     hallitem._initialize()
     hallvip._initialize()
     hallbenefits._initialize()
     hallshare._initialize()
     
     daobase.executeUserCmd(10001, 'del', 'share.status:9999:10001')
Ejemplo n.º 19
0
def get_redis(app=None):
    app = app_or_default(app)
    conf = ensure_conf(app)
    if not hasattr(app, 'redbeat_redis') or app.redbeat_redis is None:
        redis_options = conf.app.conf.get(
            'REDBEAT_REDIS_OPTIONS',
            conf.app.conf.get('BROKER_TRANSPORT_OPTIONS', {}))
        retry_period = redis_options.get('retry_period')
        if conf.redis_url.startswith(
                'redis-sentinel') and 'sentinels' in redis_options:
            from redis.sentinel import Sentinel
            sentinel = Sentinel(
                redis_options['sentinels'],
                socket_timeout=redis_options.get('socket_timeout'),
                password=redis_options.get('password'),
                decode_responses=True)
            connection = sentinel.master_for(
                redis_options.get('service_name', 'master'))
        else:
            connection = StrictRedis.from_url(conf.redis_url,
                                              decode_responses=True)

        if retry_period is None:
            app.redbeat_redis = connection
        else:
            app.redbeat_redis = RetryingConnection(retry_period, connection)

    return app.redbeat_redis
Ejemplo n.º 20
0
def server():
    redis_server = StrictRedis.from_url(REDIS_URL)
    keys = redis_server.keys(
        SCHEDULER_QUEUE_KEY % {'spider': ATestSpider.name} + '*')
    if keys:
        redis_server.delete(*keys)
    return redis_server
Ejemplo n.º 21
0
 def __init__(self):
     # Load the GeoIP databases into class attributes since they each need 20+ MB in memory
     if not self.__class__._geoip4:
         self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE)
     if not self.__class__._geoip6:
         self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE)
     self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])
Ejemplo n.º 22
0
 def __init__(self):
     self._queue = Queue(
         connection=StrictRedis(host=REDIS_HOST, port=REDIS_PORT))
     self._rq_redis_storage = RQRedisDataStorage.instance()
     self._redis_subscriber = RedisSubscriber.instance()
     self._subscribed_callbacks = {}
     self._listen_for_results()
Ejemplo n.º 23
0
def _initialize_redis_cluster() -> RedisClientType:
    if settings.USE_REDIS_CLUSTER:
        startup_nodes = settings.REDIS_CLUSTER_STARTUP_NODES
        if startup_nodes is None:
            startup_nodes = [{
                "host": settings.REDIS_HOST,
                "port": settings.REDIS_PORT
            }]
        startup_cluster_nodes = [
            ClusterNode(n["host"], n["port"]) for n in startup_nodes
        ]
        return RetryingStrictRedisCluster(
            startup_nodes=startup_cluster_nodes,
            socket_keepalive=True,
            password=settings.REDIS_PASSWORD,
            max_connections_per_node=True,
        )
    else:
        return StrictRedis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            password=settings.REDIS_PASSWORD,
            db=settings.REDIS_DB,
            socket_keepalive=True,
        )
Ejemplo n.º 24
0
def run_once():
    for port in REDIS_PORT.split(','):

        if ',' in REDIS_PORT:
            statsd_prefix = STATSD_PREFIX + '-{}'.format(port)
        else:
            statsd_prefix = STATSD_PREFIX

        redis = StrictRedis(REDIS_HOST, port)

        stats = redis.info()
        stats['keyspaces'] = {}

        for key in stats.keys():
            if key.startswith('db'):
                stats['keyspaces'][key] = stats[key]
                del stats[key]

        out_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        for g in GAUGES:
            if g in stats:
                send_metric(out_sock, '{}.{}'.format(statsd_prefix, g), 'g',
                            float(stats[g]))

        for c in COUNTERS:
            if c in stats:
                send_metric(out_sock, '{}.{}'.format(statsd_prefix, c), 'c',
                            float(stats[c]))

        for ks in stats['keyspaces']:
            for kc in KEYSPACE_COUNTERS:
                if kc in stats['keyspaces'][ks]:
                    send_metric(out_sock,
                                '{}.keyspace.{}'.format(statsd_prefix,
                                                        kc), 'c',
                                float(stats['keyspaces'][ks][kc]))

            for kg in KEYSPACE_GAUGES:
                if kg in stats['keyspaces'][ks]:
                    send_metric(out_sock,
                                '{}.keyspace.{}'.format(statsd_prefix,
                                                        kg), 'g',
                                float(stats['keyspaces'][ks][kg]))

        out_sock.close()
        time.sleep(PERIOD)
Ejemplo n.º 25
0
 def __init__(self,config):
     if self._validateConfig(config):
         self._r = StrictRedis(host=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_HOST],
                                     port=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_PORT],
                                     db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB])
         logger.debug("Obtained internal redis handler" + str(self._r))
     else:
         raise BaseException("Error validating config ")
Ejemplo n.º 26
0
 def flushdb(self):
     """Destroy every shard's db
     """
     for pool in self.pool_map.values():
         con = StrictRedis.from_url(pool.url, connection_pool=pool)
         self.log.debug("flushing shard member: %s", con)
         con.flushdb()
         del con
Ejemplo n.º 27
0
def redis(app=None):
    app = app_or_default(app)
    conf = ensure_conf(app)
    if not hasattr(app, 'redbeat_redis') or app.redbeat_redis is None:
        app.redbeat_redis = StrictRedis.from_url(conf.redis_url,
                                                 decode_responses=True)

    return app.redbeat_redis
Ejemplo n.º 28
0
 def __init__(self, dispatcher, db_host, db_port, db_num, db_pw):
     self.dispatcher = dispatcher
     pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw)
     self.redis = StrictRedis(connection_pool=pool)
     self.encoder = JSONEncoder()
     self.decoder = JSONDecoder()
     self.class_map = {}
     self.object_map = {}
Ejemplo n.º 29
0
def get_redis(app=None):
    app = app_or_default(app)
    conf = ensure_conf(app)
    if not hasattr(app, 'redbeat_redis') or app.redbeat_redis is None:
        redis_options = conf.app.conf.get(
            'REDBEAT_REDIS_OPTIONS',
            conf.app.conf.get('BROKER_TRANSPORT_OPTIONS', {}))
        retry_period = redis_options.get('retry_period')
        if conf.redis_url.startswith(
                'redis-sentinel') and 'sentinels' in redis_options:
            from redis.sentinel import Sentinel
            sentinel = Sentinel(
                redis_options['sentinels'],
                socket_timeout=redis_options.get('socket_timeout'),
                password=redis_options.get('password'),
                db=redis_options.get('db', 0),
                decode_responses=True)
            connection = sentinel.master_for(
                redis_options.get('service_name', 'master'))
        elif conf.redis_url.startswith('rediss'):
            ssl_options = {'ssl_cert_reqs': ssl.CERT_REQUIRED}
            if isinstance(conf.redis_use_ssl, dict):
                ssl_options.update(conf.redis_use_ssl)
            connection = StrictRedis.from_url(conf.redis_url,
                                              decode_responses=True,
                                              **ssl_options)
        elif conf.redis_url.startswith('redis-cluster'):
            from rediscluster import RedisCluster
            if not redis_options.get('startup_nodes'):
                redis_options = {
                    'startup_nodes': [{
                        "host": "localhost",
                        "port": "30001"
                    }]
                }
            connection = RedisCluster(decode_responses=True, **redis_options)
        else:
            connection = StrictRedis.from_url(conf.redis_url,
                                              decode_responses=True)

        if retry_period is None:
            app.redbeat_redis = connection
        else:
            app.redbeat_redis = RetryingConnection(retry_period, connection)

    return app.redbeat_redis
Ejemplo n.º 30
0
def redis(app=None):
    app = app_or_default(app)

    if not hasattr(app, 'redbeat_redis') or app.redbeat_redis is None:
        app.redbeat_redis = StrictRedis.from_url(app.conf.REDBEAT_REDIS_URL,
                                                 decode_responses=True)

    return app.redbeat_redis
Ejemplo n.º 31
0
 def __init__(self, config, section):
     from redis.client import StrictRedis
     self.conn = StrictRedis(
         config.get(section, 'redis-server'),
         config.getint(section, 'redis-port'),
         config.getint(section, 'redis-db'),
         decode_responses=True
     )
Ejemplo n.º 32
0
def initDao():
    global redisClient
    redisClient = StrictRedis('127.0.0.1', 6379, 0)
    DaoRoomInfo._ftredis =  MockRedis()
    DaoMatchStatus._ftredis =  MockRedis()
    DaoMatchSigninRecord._ftredis =  MockRedis()
    DaoUserSigninRecord._ftredis =  MockRedis()
    DaoMatchPlayerInfo._ftredis =  MockRedis()
    DaoUserMatchHistory._ftredis =  MockRedis()
Ejemplo n.º 33
0
def details(topic, pages):
    client = StrictRedis()
    with client.pipeline(transaction=False) as pipeline:
        pipeline.hgetall(topic)
        pipeline.zcard('{}/pages'.format(topic))
        pipeline.zrange('{}/pages'.format(topic), pages * -1, -1, withscores=True)
        results = pipeline.execute()

    def header(label):
        return '\n'.join(('-' * 80, label, '-' * 80))

    print header('CONFIGURATION')
    print tabulate.tabulate(results[0].items(), headers=('key', 'value'))

    print ''

    print header('PAGES ({} total)'.format(results[1]))
    print tabulate.tabulate(results[2], headers=('page', 'offset'))
Ejemplo n.º 34
0
 def cluster_factory():
     if config.get("is_redis_cluster", False):
         return RetryingStrictRedisCluster(
             startup_nodes=hosts, decode_responses=True, skip_full_coverage_check=True
         )
     else:
         host = hosts[0].copy()
         host["decode_responses"] = True
         return StrictRedis(**host)
Ejemplo n.º 35
0
    def load(self, r: StrictRedis):
        result = r.smembers("quiz:teams")
        for i in result:
            i = int(i)
            t = Team.load(r, i)
            self.teams[i] = t

        for b in self.boards.values():
            b.load(r)
Ejemplo n.º 36
0
 def __init__(self, redis_connection=None, locker=None, *args, **kwargs):
     self.__redis_connection = redis_connection
     if self.__redis_connection is None:
          self.__redis_connection = StrictRedis.from_url(current_app.conf.CELERY_REDIS_SCHEDULER_URL)
     self._schedule = EntryProxy(self.__redis_connection)
     self._locker = locker
     if self._locker is None:
         self._locker = Redlock([current_app.conf.CELERY_REDIS_SCHEDULER_URL])
     super(ChardScheduler, self).__init__(*args, **kwargs)
Ejemplo n.º 37
0
class RedisDataSource(AbstractDataSource):

    _r = None
    def __init__(self,config):
        if self._validateConfig(config):
            self._r = StrictRedis(host=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_HOST],
                                        port=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_PORT],
                                        db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB])
            logger.debug("Obtained internal redis handler" + str(self._r))
        else:
            raise BaseException("Error validating config ")


    def update(self,item):
        self.store(item)

    def store(self,item):
        self._r.set(item.getHash(), item.getValue())

    def get(self,item):
        return self._r.get(item.getHash())

    def exists(self,item):
        return self.get(item) is not None

    def all(self):

        result = []
        # Obtain all keys
        keys = self._r.keys()

        #For each key, get value
        for k in keys:
            value = self._r.get(k)
            result.append(BaseItem({"origin":"redis"},value))
        #return result
        return result

    def _validateConfig(self,config):

        validator = MultipleConfigValidator(
                        {VALIDATORS_LIST:[ContainsKeyConfigValidator({KEY_VALUE:REDIS_DATASOURCE_CONFIG})]})
        if not validator.validate(config):
            raise BaseException("Config validation error : does not contain " + REDIS_DATASOURCE_CONFIG)

        # Validate redis datasource config
        validator = MultipleConfigValidator(
                        {VALIDATORS_LIST:[ContainsKeysConfigValidator({KEYS_LIST:[REDIS_DATASOURCE_CONFIG_DB,
                                                                                  REDIS_DATASOURCE_CONFIG_HOST,
                                                                                  REDIS_DATASOURCE_CONFIG_PORT]})]})

        if not validator.validate(config[REDIS_DATASOURCE_CONFIG]):
            raise BaseException("Config validation error : config not complete ")

        return True


    def delete(self,item):
        self._r.delete(item.getHash())
Ejemplo n.º 38
0
def get_app(config=None):
    """
    App factory.

    :param dict config: configuration that can override config
        from `settings.py`
    :return: a new SuperdeskEve app instance
    """
    if config is None:
        config = {}

    config.setdefault('SOURCES', {})
    config['APP_ABSPATH'] = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))

    for key in dir(settings):
        if key.isupper():
            config.setdefault(key, getattr(settings, key))

    media_storage = SuperdeskGridFSMediaStorage
    if config.get('AMAZON_CONTAINER_NAME'):
        from superdesk.storage.amazon.amazon_media_storage import AmazonMediaStorage
        media_storage = AmazonMediaStorage

    app = Eve(
        auth=BearerAuth,
        settings=config,
        data=SuperdeskDataLayer,
        media=media_storage,
        json_encoder=MongoJSONEncoder,
        validator=SuperdeskValidator
    )

    superdesk.app = app
    _set_error_handlers(app)
    app.mail = Mail(app)
    if config.get('REDIS_URL'):
        app.redis = StrictRedis.from_url(config['REDIS_URL'], 0)

    for module_name in app.config['INSTALLED_APPS']:
        app_module = importlib.import_module(module_name)
        try:
            app_module.init_app(app)
        except AttributeError:
            pass

    for resource in config['DOMAIN']:
        app.register_resource(resource, config['DOMAIN'][resource])

    for blueprint in superdesk.BLUEPRINTS:
        prefix = app.api_prefix or None
        app.register_blueprint(blueprint, url_prefix=prefix)

    app.sentry = sentry
    sentry.init_app(app)

    return app
Ejemplo n.º 39
0
def run_once():
    for port in REDIS_PORT.split(','):

        if ',' in REDIS_PORT:
            statsd_prefix = STATSD_PREFIX + '-{}'.format(port)
        else:
            statsd_prefix = STATSD_PREFIX

        redis = StrictRedis(REDIS_HOST, port)

        stats = redis.info()
        stats['keyspaces'] = {}

        for key in stats.keys():
            if key.startswith('db'):
                stats['keyspaces'][key] = stats[key]
                del stats[key]

        out_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        for g in GAUGES:
            if g in stats:
                send_metric(out_sock, '{}.{}'.format(statsd_prefix, g), 'g', float(stats[g]))

        for c in COUNTERS:
            if c in stats:
                send_metric(out_sock, '{}.{}'.format(statsd_prefix, c), 'c', float(stats[c]))

        for ks in stats['keyspaces']:
            for kc in KEYSPACE_COUNTERS:
                if kc in stats['keyspaces'][ks]:
                    send_metric(out_sock, '{}.keyspace.{}'.format(
                        statsd_prefix, kc), 'c',
                    float(stats['keyspaces'][ks][kc]))

            for kg in KEYSPACE_GAUGES:
                if kg in stats['keyspaces'][ks]:
                    send_metric(out_sock, '{}.keyspace.{}'.format(
                        statsd_prefix, kg), 'g',
                        float(stats['keyspaces'][ks][kg]))

        out_sock.close()
        time.sleep(PERIOD)
Ejemplo n.º 40
0
def get_app(config=None):
    """
    App factory.

    :param dict config: configuration that can override config
        from `settings.py`
    :return: a new SuperdeskEve app instance
    """
    if config is None:
        config = {}

    config['APP_ABSPATH'] = os.path.dirname(
        os.path.abspath(os.path.dirname(__file__)))

    for key in dir(settings):
        if key.isupper():
            config.setdefault(key, getattr(settings, key))

    media_storage = SuperdeskGridFSMediaStorage
    if config.get('AMAZON_CONTAINER_NAME'):
        from superdesk.storage.amazon.amazon_media_storage import AmazonMediaStorage
        media_storage = AmazonMediaStorage

    app = Eve(auth=BearerAuth,
              settings=config,
              data=SuperdeskDataLayer,
              media=media_storage,
              json_encoder=MongoJSONEncoder,
              validator=SuperdeskValidator)

    superdesk.app = app
    _set_error_handlers(app)
    app.mail = Mail(app)
    if config.get('REDIS_URL'):
        app.redis = StrictRedis.from_url(config['REDIS_URL'], 0)

    for module_name in app.config['INSTALLED_APPS']:
        app_module = importlib.import_module(module_name)
        try:
            app_module.init_app(app)
        except AttributeError:
            pass

    for resource in config['DOMAIN']:
        app.register_resource(resource, config['DOMAIN'][resource])

    for blueprint in superdesk.BLUEPRINTS:
        prefix = app.api_prefix or None
        app.register_blueprint(blueprint, url_prefix=prefix)

    app.sentry = sentry
    sentry.init_app(app)

    return app
Ejemplo n.º 41
0
    def __init__(self, redis_connection=None, locker=None, *args, **kwargs):
        self.__redis_connection = redis_connection
        if self.__redis_connection is None:
            self.__redis_connection = StrictRedis.from_url(
                current_app.conf.CELERY_REDIS_SCHEDULER_URL)

        self._schedule = EntryProxy(self.__redis_connection)
        self._locker = locker
        if self._locker is None:
            self._locker = Redlock(
                [current_app.conf.CELERY_REDIS_SCHEDULER_URL])
        super(ProbitScheduler, self).__init__(*args, **kwargs)
Ejemplo n.º 42
0
def test_is_rate_limited_script():
    now = int(time.time())

    client = StrictRedis(db=9)

    # The item should not be rate limited by either key.
    assert map(
        bool,
        is_rate_limited(client, ('foo', 'bar'),
                        (1, now + 60, 2, now + 120))) == [False, False]

    # The item should be rate limited by the first key (1).
    assert map(
        bool,
        is_rate_limited(client, ('foo', 'bar'),
                        (1, now + 60, 2, now + 120))) == [True, False]

    # The item should still be rate limited by the first key (1), but *not*
    # rate limited by the second key (2) even though this is the third time
    # we've checked the quotas. This ensures items that are rejected by a lower
    # quota don't affect unrelated items that share a parent quota.
    assert map(
        bool,
        is_rate_limited(client, ('foo', 'bar'),
                        (1, now + 60, 2, now + 120))) == [True, False]

    assert client.get('foo') == '1'
    assert 59 <= client.ttl('foo') <= 60

    assert client.get('bar') == '1'
    assert 119 <= client.ttl('bar') <= 120
Ejemplo n.º 43
0
class Command(object):
    def __init__(self):
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'],
                                 Config.REDIS['DB'])

    def run(self):
        log.debug("Updating mirror database")
        geoip = GeoIP(Config.GEOIP_PATH_V4)

        for status in mirror_statuses(
                unofficial_mirrors=Config.UNOFFICIAL_MIRRORS):
            name = status['mirror']
            if name == "a.pypi.python.org":
                # don't include 'a' in the list of mirrors - it's no mirror after all
                continue
            time_diff = status['time_diff']
            if not isinstance(time_diff, timedelta):
                continue

            log.debug("  Processing mirror '%s'", name)
            record = geoip.record_by_name(name)
            lat = record['latitude']
            lon = record['longitude']

            log.debug("    Age: %d, Lat: %0.5f, Lon: %0.5f",
                      time_diff.total_seconds(), lat, lon)

            try:
                mirror = Mirror.objects.get(name=name)
            except ObjectNotFound:
                mirror = Mirror(name=name)
            mirror.age = time_diff.total_seconds()
            mirror.lat = lat
            mirror.lon = lon

            mirror.save()

        self.redis.set(Config.KEY_LAST_UPDATE, time.time())
        log.debug("Finished updating mirror database")
Ejemplo n.º 44
0
    def __init__(self):
        super().__init__()
        self.setWindowTitle("Redis Explorer")
        self.resize(QtWidgets.QDesktopWidget().availableGeometry(self).size() * 0.5)
        self.tree = QtWidgets.QTreeWidget()
        self.label = QtWidgets.QTextEdit()
        font = self.label.font()
        font.setPointSize(12)
        self.label.setFont(font)
        self.tree.setColumnCount(2)
        self.tree.header().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
        self.tree.setHeaderHidden(True)
        splitter = QtWidgets.QSplitter()
        splitter.addWidget(self.tree)
        scroll_area = QtWidgets.QScrollArea()
        scroll_area.setWidgetResizable(True)
        scroll_area.setWidget(self.label)
        splitter.addWidget(scroll_area)
        splitter.setSizes([1, 1])
        self.setCentralWidget(splitter)
        toolbar = self.addToolBar("")
        toolbar.setMovable(False)
        toolbar.setIconSize(QtCore.QSize(32, 32))
        toolbar.addAction(QtGui.QIcon("resources/list-add.png"), "").triggered.connect(lambda: self.plus_font(1))
        toolbar.addAction(QtGui.QIcon("resources/list-remove.png"), "").triggered.connect(lambda: self.plus_font(-1))
        toolbar.addAction(QtGui.QIcon("resources/view-refresh.png"), "").triggered.connect(lambda: self.refresh())
        self.redis = StrictRedis()

        def item_clicked(item: QtWidgets.QTreeWidgetItem):
            if item.parent() is None:
                return
            value = self.redis.get(item.text(0))
            value = value.decode()
            text = json.dumps(json.loads(value), ensure_ascii=False, indent=4)
            text = text.replace("  ", "&nbsp;").replace("\n", "<br/>").replace(":", "<font color=red>:</font>")
            text = re.sub(r'"(.*?)(?<!\\)"', r'<font color=green>"\g<1>"</font>', text)
            self.label.setHtml(text)

        self.tree.itemClicked.connect(item_clicked)
Ejemplo n.º 45
0
 def __init__(self,
              sentinels,
              password=None,
              socket_timeout=None,
              min_other_sentinels=0):
     self.sentinels = [
         StrictRedis(hostname,
                     port,
                     password=password,
                     socket_timeout=socket_timeout)
         for hostname, port in sentinels
     ]
     self.min_other_sentinels = min_other_sentinels
Ejemplo n.º 46
0
def test_is_rate_limited_script():
    now = int(time.time())

    client = StrictRedis(db=9)

    # The item should not be rate limited by either key.
    assert map(bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [False, False]

    # The item should be rate limited by the first key (1).
    assert map(bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [True, False]

    # The item should still be rate limited by the first key (1), but *not*
    # rate limited by the second key (2) even though this is the third time
    # we've checked the quotas. This ensures items that are rejected by a lower
    # quota don't affect unrelated items that share a parent quota.
    assert map(bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [True, False]

    assert client.get('foo') == '1'
    assert 59 <= client.ttl('foo') <= 60

    assert client.get('bar') == '1'
    assert 119 <= client.ttl('bar') <= 120
Ejemplo n.º 47
0
 def connection(self, node_id):
     if not self.__ready:
         return
     shard = self.shard_node(node_id)
     try:
         pool = self.pool_map.get(self.shard_map.get(shard))
         con = StrictRedis.from_url(pool.url, connection_pool=pool)
         yield con
     except Exception:
         self.log.exception("Something blew up in the Redis context "
                            "manager")
         raise
     finally:
         del con
Ejemplo n.º 48
0
    def load(self, r: StrictRedis):
        active = r.hget(
            "quiz:board:{}:questions".format(self.category.board.id),
            "{}-{}".format(self.category.id, self.id))
        if active is None:
            self.visible = True
        else:
            self.visible = active.decode('utf-8') == 'True'

        if not self.visible:
            socketio.emit('question.hide', {
                'category': self.category.id,
                'id': self.id
            })
Ejemplo n.º 49
0
    def test_truncate_timeline_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'

        # Preload some fake records (the contents don't matter.)
        records = list(itertools.islice(self.records, 10))
        for record in records:
            client.zadd(timeline, record.timestamp, record.key)
            client.set(make_record_key(timeline, record.key), 'data')

        with self.assertChanges(lambda: client.zcard(timeline), before=10, after=5):
            truncate_timeline((timeline,), (5,), client)

            # Ensure the early records don't exist.
            for record in records[:5]:
                assert not client.zscore(timeline, record.key)
                assert not client.exists(make_record_key(timeline, record.key))

            # Ensure the later records do exist.
            for record in records[-5:]:
                assert client.zscore(timeline, record.key) == float(record.timestamp)
                assert client.exists(make_record_key(timeline, record.key))
Ejemplo n.º 50
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')
        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        # The first addition should cause the timeline to be added to the waiting set.
        with self.assertChanges(waiting_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_waiting_set, before=None, after=timestamp):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp), client)

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp + 50), client)

        # If we see a record with a timestamp earlier than the schedule time,
        # we should change the schedule.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 50):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client)

        # Move the timeline from the waiting set to the ready set.
        client.zrem('waiting', timeline)
        client.zadd('ready', timestamp, timeline)

        # Nothing should change.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client)
Ejemplo n.º 51
0
    def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
                 **connection_kwargs):
        # if sentinel_kwargs isn't defined, use the socket_* options from
        # connection_kwargs
        if sentinel_kwargs is None:
            sentinel_kwargs = dict([(k, v)
                                    for k, v in iteritems(connection_kwargs)
                                    if k.startswith('socket_')
                                    ])
        self.sentinel_kwargs = sentinel_kwargs

        self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs)
                          for hostname, port in sentinels]
        self.min_other_sentinels = min_other_sentinels
        self.connection_kwargs = connection_kwargs
Ejemplo n.º 52
0
class Command(object):
    def __init__(self):
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])

    def run(self):
        log.debug("Updating mirror database")
        geoip = GeoIP(Config.GEOIP_PATH_V4)

        for status in mirror_statuses(unofficial_mirrors=Config.UNOFFICIAL_MIRRORS):
            name = status['mirror']
            if name == "a.pypi.python.org":
                # don't include 'a' in the list of mirrors - it's no mirror after all
                continue
            time_diff = status['time_diff']
            if not isinstance(time_diff, timedelta):
                continue

            log.debug("  Processing mirror '%s'", name)
            record = geoip.record_by_name(name)
            lat = record['latitude']
            lon = record['longitude']

            log.debug("    Age: %d, Lat: %0.5f, Lon: %0.5f", time_diff.total_seconds(), lat, lon)

            try:
                mirror = Mirror.objects.get(name=name)
            except ObjectNotFound:
                mirror = Mirror(name=name)
            mirror.age = time_diff.total_seconds()
            mirror.lat = lat
            mirror.lon = lon

            mirror.save()

        self.redis.set(Config.KEY_LAST_UPDATE, time.time())
        log.debug("Finished updating mirror database")
Ejemplo n.º 53
0
 def pipeline(self, node_id):
     if not self.__ready:
         return
     shard = self.shard_node(node_id)
     try:
         pool = self.pool_map.get(self.shard_map.get(shard))
         con = StrictRedis.from_url(pool.url, connection_pool=pool)
         with con.pipeline() as pipe:
             try:
                 yield pipe
                 pipe.execute()
             except:
                 self.log.exception("Something blew up inside a pipeline "
                                    "context")
                 pipe.reset()
                 raise
     except Exception:
         self.log.exception("Something blew up in the Redis context "
                            "manager")
         raise
     finally:
         del con
Ejemplo n.º 54
0
class RedisCache(CacheBase):
    def __init__(self, config, section):
        from redis.client import StrictRedis
        self.conn = StrictRedis(
            config.get(section, 'redis-server'),
            config.getint(section, 'redis-port'),
            config.getint(section, 'redis-db'),
            decode_responses=True
        )

    def check_password(self, user, password):
        """Check the given user and password.

        Returns None on cache miss, True if password matches, False if not.
        """
        cached = self.conn.get(self.prefix('%s-pass' % user))
        if cached is None:
            return cached
        else:
            return cached == self.hash(password, cached)

    def set_password(self, user, password):
        self.conn.set(self.prefix('%s-pass' % user), self.hash(password, None), ex=self.expire)

    def in_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        if not self.conn.exists(key):
            return None

        return not self.conn.smembers(key).isdisjoint(groups)

    def set_groups(self, user, groups):
        key = self.prefix('%s-groups' % user)
        pipe = self.conn.pipeline()
        pipe.sadd(key, *groups).expire(key, self.expire)
        pipe.execute()
Ejemplo n.º 55
0
    def test_ensure_timeline_scheduled_script(self):
        client = StrictRedis(db=9)

        timeline = 'timeline'
        timestamp = 100.0

        waiting_set_size = functools.partial(client.zcard, 'waiting')
        ready_set_size = functools.partial(client.zcard, 'ready')

        timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline)
        timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline)

        keys = ('waiting', 'ready', 'last-processed')

        # The first addition should cause the timeline to be added to the ready set.
        with self.assertChanges(ready_set_size, before=0, after=1), \
                self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1

        # Adding it again with a timestamp in the future should not change the schedule time.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(ready_set_size), \
                self.assertDoesNotChange(timeline_score_in_ready_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None

        # Move the timeline from the ready set to the waiting set.
        client.zrem('ready', timeline)
        client.zadd('waiting', timestamp, timeline)
        client.set('last-processed', timestamp)

        increment = 1
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None

        # Make sure the schedule respects the maximum value.
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None

        # Test to ensure a missing last processed timestamp can be handled
        # correctly (chooses minimum of schedule value and record timestamp.)
        client.zadd('waiting', timestamp, timeline)
        client.delete('last-processed')
        with self.assertDoesNotChange(waiting_set_size), \
                self.assertDoesNotChange(timeline_score_in_waiting_set):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None

        with self.assertDoesNotChange(waiting_set_size), \
                self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100):
            assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
Ejemplo n.º 56
0
 def __init__(self,host='127.0.0.1',port=6379):
     self.r = StrictRedis(host,port)
Ejemplo n.º 57
0
class RedisStore():
    def __init__(self, dispatcher, db_host, db_port, db_num, db_pw):
        self.dispatcher = dispatcher
        pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw)
        self.redis = StrictRedis(connection_pool=pool)
        self.encoder = JSONEncoder()
        self.decoder = JSONDecoder()
        self.class_map = {}
        self.object_map = {}
    
    def create_object(self, dbo, update_rev=False):
        self.save_object(dbo)
        dbo.on_loaded()
            
    def save_object(self, dbo, update_rev=False, autosave=False):
        if update_rev:
            dbo.dbo_rev = getattr(dbo, "dbo_rev", 0) + 1
        json_obj = self.build_json(dbo)
        key = dbo.dbo_key
        self.redis.set(key, self.encoder.encode(json_obj))
        if dbo.dbo_set_key:
            self.redis.sadd(dbo.dbo_set_key, key)
        self.dispatcher.dispatch("db_log{0}".format("_auto" if autosave else ""), "object saved: " + key)
        self.object_map[dbo.dbo_key] = dbo;
    
    def build_json(self, dbo):
        dbo.before_save()
        json_obj = {}
        if dbo.__class__ != dbo.dbo_base_class:
            json_obj["class_name"] = dbo.__module__ + "." + dbo.__class__.__name__
        for field_name in dbo.dbo_fields:
            json_obj[field_name] = getattr(dbo, field_name, None)
        for dbo_col in dbo.dbo_collections:
            coll_list = list()
            for child_dbo in getattr(dbo, dbo_col.field_name):
                if dbo_col.key_type:
                    coll_list.append(child_dbo.dbo_id)
                else:
                    coll_list.append(self.build_json(child_dbo))
            json_obj[dbo_col.field_name] = coll_list
        for dbo_ref in dbo.dbo_refs:
            ref = getattr(dbo, dbo_ref.field_name, None)
            if ref:
                json_obj[dbo_ref.field_name] = ref.dbo_id   
        return json_obj
    
    def cache_object(self, dbo):
        self.object_map[dbo.dbo_key]
    
    def load_cached(self, key):
        return self.object_map.get(key)
    
    def evict(self, dbo):
        try:
            del self.object_map[dbo.dbo_key]
        except:
            self.dispatcher.dispatch("db_log", "Failed to evict " + dbo.dbo_key + " from db cache")
                
    def load_by_key(self, key_type, key, base_class=None):
        dbo_key = key_type + ":" + key
        cached_dbo = self.object_map.get(dbo_key)
        if cached_dbo:
            return cached_dbo
        json_str = self.redis.get(dbo_key)
        if not json_str:
            return None
        json_obj = self.decoder.decode(json_str)
        dbo = self.load_class(json_obj, base_class)(key)
        if dbo.dbo_key_type:
            self.object_map[dbo.dbo_key] = dbo
        self.load_json(dbo, json_obj)
        return dbo
        
    def load_class(self, json_obj, base_class):
        class_path = json_obj.get("class_name")
        if not class_path:
            return base_class
        clazz = self.class_map.get(class_path)
        if clazz:
            return clazz
        split_path = class_path.split(".")
        module_name = ".".join(split_path[:-1])
        class_name = split_path[-1]
        module = __import__(module_name, globals(), locals(), [class_name])
        clazz = getattr(module, class_name)
        self.class_map[class_path] = clazz
        return clazz 
    
    def load_object(self, dbo_class, key):
        return self.load_by_key(dbo_class.dbo_key_type, key, dbo_class)
    
    def load_json(self, dbo, json_obj):
        
        for field_name in dbo.dbo_fields:
            try:
                setattr(dbo, field_name, json_obj[field_name])
            except KeyError:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing field " + field_name)
        for dbo_col in dbo.dbo_collections:
            coll = getattr(dbo, dbo_col.field_name, [])
            try:
                for child_json in json_obj[dbo_col.field_name]:
                    if dbo_col.key_type:
                        child_dbo = self.load_by_key(dbo_col.key_type, child_json, dbo_col.base_class)
                    else:
                        child_dbo = self.load_class(child_json, dbo_col.base_class)()
                        self.load_json(child_dbo, child_json)
                    coll.append(child_dbo)
            except AttributeError:
                self.dispatcher.dispatch("db_log", "{0} json failed to load for coll {1} in {2}".format(child_json, dbo_col.field_name, dbo.dbo_id))
            except KeyError:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing collection " + dbo_col.field_name)
        
        for dbo_ref in dbo.dbo_refs:
            try:
                ref_key = json_obj[dbo_ref.field_name]
                ref_obj = self.load_by_key(dbo_ref.key_type, ref_key, dbo_ref.base_class)
                setattr(dbo, dbo_ref.field_name, ref_obj)    
            except:
                self.dispatcher.dispatch("db_log", "db: Object " + dbo.dbo_key + " json missing ref " + dbo_ref.field_name)
        dbo.on_loaded()    
        return True
                    
    def delete_object(self, dbo):
        key = dbo.dbo_key
        self.redis.delete(key)
        if dbo.dbo_set_key:
            self.redis.srem(dbo.dbo_set_key, key)
        for dbo_col in dbo.dbo_collections:
            if dbo_col.key_type:
                coll = getattr(dbo, dbo_col.field_name, set())
                for child_dbo in coll:
                    self.delete_object(child_dbo)
        self.dispatcher.dispatch("db_log", "object deleted: " + key)
        if self.object_map.get(dbo.dbo_key):
            del self.object_map[dbo.dbo_key]
        return True
        
    def fetch_set_keys(self, set_key):
        return self.redis.smembers(set_key)
Ejemplo n.º 58
0
class DistanceCalculator(object):
    _geoip4 = None
    _geoip6 = None

    def __init__(self):
        # Load the GeoIP databases into class attributes since they each need 20+ MB in memory
        if not self.__class__._geoip4:
            self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE)
        if not self.__class__._geoip6:
            self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE)
        self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])

    @staticmethod
    def _haversine(lon1, lat1, lon2, lat2):
        """
        Calculate the great circle distance between two points
        on the earth (specified in decimal degrees)
        """
        # convert decimal degrees to radians
        lon1, lat1, lon2, lat2 = map(lambda v: radians(float(v)), [lon1, lat1, lon2, lat2])
        # haversine formula
        dlon = lon2 - lon1
        dlat = lat2 - lat1
        a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
        c = 2 * asin(sqrt(a))
        km = 6367 * c  # convert to km
        return km

    def get_mirror_distances(self, address):
        last_update = self.redis.get(Config.KEY_LAST_UPDATE)
        key = Config.KEY_MIRROR.format(address, last_update)
        distances = OrderedDict(self.redis.zrange(key, 0, -1, withscores=True))
        if not distances:
            if address.startswith("::ffff:"):
                address = address.replace("::ffff:", "")
            try:
                if ":" in address:
                    record = self._geoip6.record_by_addr(address)
                else:
                    record = self._geoip4.record_by_addr(address)
            except socket.error:
                raise GeoIPLookupError()
            if not record:
                raise GeoIPLookupError()
            lat = record['latitude']
            lon = record['longitude']

            distances = OrderedDict(
                sorted(
                    (
                        (mirror.name, self._haversine(lon, lat, mirror.lon, mirror.lat))
                        for mirror in Mirror.objects.filter(age__lt=3601)
                    ),
                    key=itemgetter(1)
                )
            )
            if distances:
                self.redis.zadd(key, **distances)
                self.redis.expire(key, 60 * 10)  # 10 min
        return distances

    def get_nearest_mirror(self, address):
        try:
            distances = self.get_mirror_distances(address)
            if distances:
                return next(distances.iteritems())[0]
            return Config.FALLBACK_MIRROR
        except GeoIPLookupError:
            return Config.FALLBACK_MIRROR
Ejemplo n.º 59
0
    class RedisSessionObject():
        implements(ISession)

        def __init__(self, request):
            self._options = _options
            self.rd = None
            self._master_rd = False
            self.request = request
            self._data = None
            self.id = None
            self._new_session = True
            self._changed = False

            cookie = self.request.headers.get('Cookie')
            if cookie is None:
                self.__create_id()
            else:
                c = SimpleCookie()
                c.load(cookie)
                session_cookie = c.get(self._options['_cookie_name'])
                if session_cookie is None:
                    #new session!
                    self.__create_id()
                else:
                    self.id = session_cookie.value
                    self._new_session = False

            def session_callback(request, response):
                exception = getattr(request, 'exception', None)
                commit = self._changed
                increase_expire_mod = _options['_increase_expire_mod']
                if increase_expire_mod > 0:
                    rnd = round(random.random() * 1000000)
                    mod = rnd % increase_expire_mod
                    if not mod:
                    #                        print 'Saving due to increase_expire_mod'
                        commit = True

                if exception is None and commit:
                    self.__save()
                    cookie = SimpleCookie()
                    _cname = self._options['_cookie_name']
                    cookie[_cname] = self.id
                    domain = self._options.get('cookie_domain')
                    cookie[_cname]['path'] = _options['_path']
                    if domain is not None:
                        cookie[_cname]['domain'] = domain
                    if self._options['_secure']:
                        cookie[_cname]['secure'] = True
                    header = cookie[_cname].output(header='')
                    #                    print 'Writing cookie header:',header
                    response.headerlist.append(('Set-Cookie', header))

            request.add_response_callback(session_callback)

        # private methods
        def __init_rd(self, master=False):
            if self.rd is None:
                if master:
                    self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2])
                    self._master_rd = True
                else:
                    server = random.choice(_redis_servers)
                    self.rd = StrictRedis(host=server[0], port=server[1], db=server[2])
                    self._master_rd = False
            elif master and not self._master_rd:
                self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2])
                self._master_rd = True

        def __key(self):
            return 'rd:ses:%s' % self.id

        def __load(self):
            if self._data is None:
                self.__init_rd()
                data = self.rd.get(self.__key())
                if data is not None:
                    self._data = msgpack.unpackb(data, use_list=True, encoding='utf-8')
                else:
                    self._data = {}

        def __save(self):
            if self._data is not None and len(self._data):
                self.__init_rd(master=True)
                self.rd.setex(self.__key(), self._options['_expire'], msgpack.packb(self._data, encoding='utf-8'))

        def __create_id(self):
            self.id = hashlib.sha1(hashlib.sha1("%f%s%f%s" % (time.time(), id({}), random.random(), getpid())).hexdigest(), ).hexdigest()

        def init_with_id(self, session_id):
            """
            Init the session with custom id. the session data is no loaded immediately but loaded only when data is accessed
            :param session_id:
            :return: self
            """
            self.id = session_id
            self._data = None
            return self

        def set_expire(self, expire):
            self._options['_expire'] = expire

        # ISession API
        def save(self):
            self._changed = True

        def invalidate(self):
            self.__init_rd(master=True)
            self.rd.delete(self.__key())
            #todo: delete cookie

        def changed(self):
            self._changed = True

        def flash(self, msg, queue='', allow_duplicate=True):
            self.__load()
            key = '_flsh:%s_' % queue
            q = self.get(key, [])
            if not allow_duplicate:
                if msg not in q:
                    q.append(msg)
            else:
                q.append(msg)
            self[key] = q

        def pop_flash(self, queue=''):
            self.__load()
            key = '_flsh:%s_' % queue
            q = self.get(key, [])
            if len(q):
                e = q.pop()
                self[key] = q
                return e
            return None

        def peek_flash(self, queue=''):
            self.__load()
            key = '_flsh:%s_' % queue
            q = self.get(key, [])
            if len(q):
                e = q[0]
                return e
            return None

        def new_csrf_token(self):
            token = os.urandom(20).encode('hex')
            self['_csrft_'] = token
            return token

        def get_csrf_token(self):
            token = self.get('_csrft_', None)
            if token is None:
                token = self.new_csrf_token()
            return token

        # mapping methods
        def __getitem__(self, key):
            self.__load()
            return self._data[key]

        def get(self, key, default=None):
            self.__load()
            return self._data.get(key, default)

        def __delitem__(self, key):
            self.__load()
            del self._data[key]
            self._changed = True

        def __setitem__(self, key, value):
            self.__load()
            self._data[key] = value
            self._changed = True

        def keys(self):
            self.__load()
            return self._data.keys()

        def values(self):
            self.__load()
            return self._data.values()

        def items(self):
            self.__load()
            return self._data.items()

        def iterkeys(self):
            self.__load()
            return iter(self._data.keys())

        def itervalues(self):
            self.__load()
            return iter(self._data.values())

        def iteritems(self):
            self.__load()
            return iter(self._data.items())

        def clear(self):
            self.__load()
            self._data = {}
            self._changed = True

        def update(self, d):
            self.__load()
            for k in self._data.keys():
                d[k] = self._data[k]

        def multi_set(self, d):
#            print '[update]', self.id
            self.__load()
            for k in d.keys():
                self._data[k] = d[k]
            self._changed = True

        def setdefault(self, key, default=None):
            """D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"""
            pass

        def pop(self, k, *args):
            """remove specified key and return the corresponding value
            ``*args`` may contain a single default value, or may not be supplied.
            If key is not found, default is returned if given, otherwise
            ``KeyError`` is raised"""
            pass

        def popitem(self):
            """remove and return some (key, value) pair as a
            2-tuple; but raise ``KeyError`` if mapping is empty"""
            pass

        def __len__(self):
            self.__load()
            return len(self._data)


        def __iter__(self):
            return self.iterkeys()

        def __contains__(self, key):
            self.__load()
            return key in self._data

        @property
        def new(self):
            return self._new_session
Ejemplo n.º 60
0
try:
    import simplejson as json
except ImportError:
    import json

from celery.beat import Scheduler, ScheduleEntry
from celery.utils.log import get_logger
from celery import current_app
import celery.schedules

from redis.client import StrictRedis

from decoder import DateTimeDecoder, DateTimeEncoder

# share with result backend
rdb = StrictRedis.from_url(current_app.conf.CELERY_REDIS_SCHEDULER_URL)


class ValidationError(Exception):
    pass


class PeriodicTask(object):
    '''represents a periodic task
    '''
    name = None
    task = None

    type_ = None

    interval = None