class RedisQueue(object):
    def __init__(self, name, namespace='queue', **redis_kwargs):
        self.__db = Redis(**redis_kwargs)
        self.key = "%s :%s" % (namespace, name)

    def qsize(self):
        return self.__db.llen(self.key)

    def empty(self):
        return self.qsize() == 0

    def put(self, item):
        self.__db.rpush(self.key, item)

    def get(self, block=True, timeout=None):
        if block:
            item = self.__db.blpop(self.key, timeout) #if empty, blocking
        else:
            item = self.__db.lpop(self.key)  #if empty,return nothing

        if item:
            item = item[1]
        return item

    def get_nowait(self):
        return self.get(False)

    def clear(self):
        for i in xrange(self.qsize()):
            item = self.get()
            self.__db.delete(item)

        if self.empty():
            print "queue is empty"
예제 #2
0
class HashRingTestCase(unittest.TestCase):
    def setUp(self):
        self.redis = Redis()
        self.redis.delete(TEST_KEY)

    def get_node(self, n_replicas, total_replicas):
        node = RingNode(self.redis, TEST_KEY, n_replicas=n_replicas)

        self.assertEqual(len(node.replicas), n_replicas)
        self.assertEqual(self.redis.zcard(TEST_KEY), total_replicas-n_replicas)

        node.heartbeat()

        self.assertEqual(self.redis.zcard(TEST_KEY), total_replicas)
        self.assertEqual(len(node.ranges), 0)

        return node

    def test_node(self):
        node1 = self.get_node(1, 1)
        node1.update()
        self.assertEqual(len(node1.ranges), 1)

        node2 = self.get_node(1, 2)
        node1.update()
        node2.update()
        self.assertEqual(len(node1.ranges) + len(node2.ranges), 3)

        node3 = self.get_node(2, 4)
        node1.update()
        node2.update()
        node3.update()
        self.assertEqual(len(node1.ranges) + len(node2.ranges) + len(node3.ranges), 5)
def reset(tweet_filter):
    redis = Redis(
        host = args.redis_host,
        port = int(args.redis_port),
        db = 0
    )

    if (not tweet_filter):
        return json.dumps(
            {
                'response'  : 'error',
                'reason'    : 'No tweet filter',
            }
        )

    keys = redis.keys("%s:*" % tweet_filter)
    count = len(keys)

    redis.delete(*keys)

    return json.dumps(
        {
            'response'  : 'ok',
            'debug'     : 'Deleted %s keys' % count,
        }
    )
예제 #4
0
class RedisSessionStore(_SessionStore):
    u"""Redis を使ったセッションストア

    Author: @soundkitchen Izukawa Takanobu

    :param
    """
    def __init__(self, host='localhost', port=6379, db=0, expire=0, session_class=None):
        from redis import Redis
        super(RedisSessionStore, self).__init__(session_class=session_class)
        self._conn = Redis(host=host, port=port, db=db)
        self._expire = int(expire)

    def save(self, session):
        packed = msg.dumps(dict(session))
        self._conn.set(session.sid, packed)

    def delete(self, session):
        self._conn.delete(session.sid)

    def get(self, sid):
        if not self.is_valid_key(sid):
            return self.new()

        packed = self._conn.get(sid)
        try:
            data = msg.loads(packed, encoding='utf-8')
            if self._expire:
                self._conn.expire(sid, self._expire)
        except TypeError, e:
            data = {}
        return self.session_class(data, sid, False)
예제 #5
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.connection_pool = params.pop('connection_pool', None)
        self.db = params.pop('db', None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = Redis(host=host, port=int(port), connection_pool=self.connection_pool, db=self.db, **params)

    def __contains__(self, key):
        log.debug('%s contained in redis cache (as %s) : %s'%(key, self._format_key(key), self.db_conn.exists(self._format_key(key))))
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value):
        key = self._format_key(key)
        self.db_conn.set(key, pickle.dumps(value))

    def __delitem__(self, key):
        key = self._format_key(key)
        self.db_conn.delete(key)

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        raise self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #6
0
파일: lock.py 프로젝트: qiaohui/pygaga
def check_redis_lock(key, value, logger):
    r = None
    try:
        r = Redis(host = FLAGS.redis_lockserver)

        if FLAGS.unlock:
            r.delete(key)
            logger.info('%s has been deleted' % key)
            sys.exit(0)

        if r.exists(key):
            logger.warning('%s is running' % key)
            sys.exit(1)
    except:
        logger.error("Redis failed %s, %s", key, traceback.format_exc())
        sys.exit(0)

    try:
        try:
            r.set(key, value)
        except:
            logger.error("Redis lock failed %s, %s", key, traceback.format_exc())
            sys.exit(0)
        yield
    finally:
        if FLAGS.autounlock:
            r.delete(key)
예제 #7
0
class BaseQueue(object):
    def __init__(self, key, db, host, port=6379):
        """Redis task queue for spider

        :type key: object
        :param server: redis server
        :param spider: spider instance
        :param key: key of redis queue
        """
        # TODO encode the url
        self.server = Redis(host, port, db)
        self.key = key

    def __len__(self):
        """Return the length of the queue"""
        raise NotImplementedError

    def push(self, url):
        """Push an url"""
        raise NotImplementedError

    def pop(self):
        """Pop an url"""
        raise NotImplementedError

    def clear(self):
        """Clear queue"""
        self.server.delete(self.key)
예제 #8
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = Redis(host=host, port=int(port), **params)

    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)
        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value))
        else:
            self.db_conn.set(key, pickle.dumps(value))

    def __delitem__(self, key):
        key = self._format_key(key)
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #9
0
def clear():
    prefix = settings.CONST_REDIS_KEY_PREFIX
    redis = Redis()
    keys = redis.keys("{0}*".format(prefix))
    for key in keys:
        key = key.decode("utf8")
        redis.delete(key)
예제 #10
0
class TestPyrqClient(unittest.TestCase):
    def setUp(self):
        self._pyrq_client = client.PyRqClient(QUEUE_NAME)
        self._pyrq_client.container = container = ConfigContainer(client.CONFIG_KEY)
        self._pyrq_client._setup()

        configuration = container.config[client.CONFIG_KEY]
        self._redis_client = Redis(host=configuration['host'], port=configuration['port'], db=configuration['db'],
                                   password=configuration['password'], decode_responses=True)
        self._queue = Queue(QUEUE_NAME, self._redis_client)

    def tearDown(self):
        self._redis_client.delete(QUEUE_NAME)

    def test_dispatch(self):
        self._pyrq_client.dispatch('test_method', arg1='aaa', arg2=11)
        expected = {
            'method': 'test_method',
            'params': {
                'arg1': 'aaa',
                'arg2': 11
            }
        }
        actual = self._queue.get_items(1)[0]
        self.assertEquals(expected, json.loads(actual))
        self._queue.ack_item(actual)

    def test_is_empty(self):
        self.assertTrue(self._pyrq_client.is_empty())
        self._pyrq_client.dispatch('whatever')
        self.assertFalse(self._pyrq_client.is_empty())
예제 #11
0
def filter(genes, full_query_key, query_key, query_pending_key, uuid):
    """Filter for a given set of gene names"""
    from webdorina import RESULT_TTL, SESSION_TTL
    redis_store = Redis()

    full_results = redis_store.lrange(full_query_key, 0, -1)
    results = []
    for res_string in full_results:
        if res_string == '':
            continue
        cols = res_string.split('\t')
        annotations = cols[8]
        for field in annotations.split(';'):
            key, val = field.split('=')
            if key == 'ID' and val in genes:
                results.append(res_string)

    num_results = len(results)
    if num_results == 0:
        results.append('\t\t\t\t\t\t\t\tNo results found')
        num_results += 1

    for i in xrange(0, num_results, 1000):
        res = results[i:i+1000]
        redis_store.rpush(query_key, *res)

    redis_store.expire(query_key, RESULT_TTL)
    redis_store.delete(query_pending_key)

    redis_store.setex('sessions:{0}'.format(uuid), json.dumps(dict(state='done', uuid=uuid)), SESSION_TTL)
    redis_store.setex('results:sessions:{0}'.format(uuid), json.dumps(dict(redirect=query_key)), SESSION_TTL)
예제 #12
0
class RedisStore(object):
    """Caching implementation that uses Redis as data storage.
    """
    def __init__(self, host='localhost', port=6379, db=0):
        try:
            from redis import Redis
        except ImportError:
            raise RuntimeError('Redis support is not available')
        self.redis = Redis(host=host, port=port, db=db)

    def __getitem__(self, key):
        entry = self.redis.get(key)
        if entry is None:
            return None

        entry = json.loads(entry.decode('utf-8'))
        return CacheEntry.parse(entry)

    def __setitem__(self, key, entry):
        data = json.dumps(entry.to_dict()).encode('utf-8')
        self.redis.set(key, data)

    def __delitem__(self, key):
        self.redis.delete(key)

    def clear(self):
        self.redis.flushdb()

    def __len__(self):
        return self.redis.dbsize()
예제 #13
0
def filter_genes(genes, full_query_key, query_key, query_pending_key, uuid,
                 session_ttl=None, result_ttl=None):
    """Filter for a given set of gene names"""
    redis_store = Redis(charset="utf-8", decode_responses=True)

    full_results = redis_store.lrange(full_query_key, 0, -1)
    results = []
    for res_string in full_results:
        if res_string == '':
            continue
        cols = res_string.split('\t')
        annotations = cols[8]
        for field in annotations.split(';'):
            key, val = field.split('=')
            if key == 'ID' and val in genes:
                results.append(res_string)

    num_results = len(results)
    if num_results:
        for i in range(0, num_results, 1000):
            res = results[i:i + 1000]
            redis_store.rpush(query_key, *res)
    else:
        redis_store.rpush(query_key, [])

    redis_store.expire(query_key, result_ttl)
    redis_store.delete(query_pending_key)

    redis_store.setex('sessions:{0}'.format(uuid), json.dumps(dict(
        state='done', uuid=uuid)), session_ttl)
    redis_store.setex('results:sessions:{0}'.format(uuid), json.dumps(dict(
        redirect=query_key)), session_ttl)
예제 #14
0
파일: core.py 프로젝트: neilmock/understudy
class Result(object):
    def __init__(self, uuid, redis):
        self.uuid = uuid
        self.log = ""
        self.redis = Redis(host=redis.host,
                           port=redis.port,
                           db=redis.db,
                           password=redis.connection.password)

    def check_log(self):
        log = self.redis.lpop("understudy:log:%s" % self.uuid)
        while log:
            self.log += log

            log = self.redis.lpop("understudy:log:%s" % self.uuid)

        return self.log

    def check(self):
        self.check_log()

        result = self.redis.get("understudy:result:%s" % self.uuid)
        if result:
            self.redis.delete("understudy:result:%s" % self.uuid)

            return result

        return None
예제 #15
0
파일: work.py 프로젝트: Answeror/aip
def group_app_task_out(lock, name, appops, timeout):
    from redis import Redis
    redis = Redis()
    try:
        group_app_task(redis, lock, name, appops, timeout)
    finally:
        redis.delete(lock)
예제 #16
0
def _test_thinset(conn):
    conn = Redis(db=1)
    conn.delete('test')
    s = ThinSet('test', 10000, connection=conn)
    l = range(10) 
    s.add(*l)
    assert s.contains(*range(0, 20, 2)) == [True, True, True, True, True, False, False, False, False, False]
예제 #17
0
class RefsContainerImplementation(RefsContainer):
    def __init__(self, container):
        self.db = Redis()

        self.container = container
        
    def _calc_ref_path(self, ref):
        return '%s::%s' % (self.container, ref)

    def allkeys(self):
        return self.db.keys()
    
    def read_loose_ref(self, name):
        k = self._calc_ref_path(name)
        d = self.db.get(k)
        if d:
            return d
        else:
            return False
    
    def get_packed_refs(self):
        return {}
    
    def set_symbolic_ref(self, name, other):
        k = self._calc_ref_path(name)
        sref = SYMREF + other
        log.debug('setting symbolic ref %s to %r' % (name, sref))
        k = self.db.set(k, sref)
    
    def set_if_equals(self, name, old_ref, new_ref):
        if old_ref is not None and self.read_loose_ref(name) != old_ref:
            return False

        realname, _ = self._follow(name)

        # set ref (set_if_equals is actually the low-level setting function)
        k = self.db.set(self._calc_ref_path(name), new_ref)
        return True
    
    def add_if_new(self, name, ref):
        if None != self.read_loose_ref(name):
            return False

        self.set_if_equals(name, None, ref)
        return True
    
    def remove_if_equals(self, name, old_ref):
        k = self.db.get(self._calc_ref_path(name))
        if None == k: 
            return True

        if old_ref is not None and k != old_ref:
            return False

        self.db.delete(self._calc_ref_path(name))
        return True
    
    def __getitem__(self, name):
        name = "%s::%s" % (self.container, name) 
예제 #18
0
파일: tests.py 프로젝트: samuelg/entertain
    def setUp(self):
        r = Redis(db=9)
        systems = ('xbox', 'wii', 'ds', 'ps3', 'pc')

        r.delete(MUSIC_KEY)
        for system in systems:
            r.delete('%s%s'%(GAMES_KEY, system))
        r.save()
예제 #19
0
class TestImportSet(unittest.TestCase):

    def setUp(self):
        self.redis_client = Redis()
        self.redis_import = RedisImport()
        self.key = 'test_set'
        #make sure key doesn't exist already
        self.redis_client.delete(self.key)

    def tearDown(self):
        self.redis_client.delete(self.key)

    def test_load_set(self):
        """
        Test basic set import
        """
        somestuff = ['1', '2', '3', 'a', 'b', 'c']
        self.redis_import.load_set(self.key, somestuff)

        self.assertEquals(len(somestuff),
                self.redis_client.scard(self.key))

        self.assertEquals(self.redis_client.smembers(self.key),
                set(somestuff))

    def test_set_elements_unique(self):
        """
        Test that duplicate elements are removed in sets
        """

        #verify that repeated elements are removed
        repeats = ['x'] * 5

        self.redis_import.load_set(self.key, repeats)
        self.assertEquals(1,
                self.redis_client.scard(self.key))

    def test_non_validated(self):
        """
        Test that input with non-string types fails when validation is off
        """
        non_strings = [1, 2, 3]
        self.assertRaises(
                TypeError,
                self.redis_import.load_set,
                self.key,
                non_strings)

    def test_validated(self):
        """
        Test that validation handles mixed types
        """
        non_strings = [1, 2, 3]
        self.redis_import.load_set(self.key, non_strings, validate_input=True)
        self.assertEquals(3,
                self.redis_client.scard(self.key))
        self.assertEquals(set([str(x) for x in non_strings]),
                self.redis_client.smembers(self.key))
예제 #20
0
class RedisSessionInterface(SessionInterface):
    serializer = pickle
    session_class = RedisSession

    def __init__(self, app, prefix='session:'):
        self.app = app
        self.config = app.config

        params = dict(
            host=self.config.get('CACHE_REDIS_HOST', 'localhost'),
            port=self.config.get('CACHE_REDIS_PORT', 6379),
            password=self.config.get('CACHE_REDIS_PASSWORD', None),
            db=self.config.get('CACHE_REDIS_DB', 0),
            default_timeout=300
        )

        self.redis = Redis()
        self.prefix = prefix

    def generate_sid(self):
        return str(uuid4())

    def get_redis_expiration_time(self, app, session):
        if session.permanent:
            return app.permanent_session_lifetime
        return timedelta(days=1)

    def open_session(self, app, request):
        sid = request.cookies.get(app.session_cookie_name)
        if not sid:
            sid = self.generate_sid()
            return self.session_class(sid=sid)
        val = self.redis.get(self.prefix + sid)
        if val is not None:
            data = self.serializer.loads(val)
            return self.session_class(data, sid=sid)
        return self.session_class(sid=sid, new=True)

    def save_session(self, app, session, response):
        domain = self.get_cookie_domain(app)
        if not session:
            self.redis.delete(self.prefix + session.sid)
            if session.modified:
                response.delete_cookie(app.session_cookie_name,
                                       domain=domain)
            return
        redis_exp = self.get_redis_expiration_time(app, session)
        cookie_exp = self.get_expiration_time(app, session)
        val = self.serializer.dumps(dict(session))
        self.redis.setex(
            self.prefix + session.sid,
            val,
            int(redis_exp.total_seconds())
        )
        response.set_cookie(app.session_cookie_name, session.sid,
                            expires=cookie_exp, httponly=True,
                            domain=domain)
예제 #21
0
def get_stock_num():
    Stock_queue = Queue.Queue()

    server = Redis(host='192.168.1.108')
    server.delete('stocks')
    posts_account = 0
    for stock_num in open('stocknums.txt', 'r'):
        stocknum = stock_num.strip()
        server.sadd('stocks', stocknum)
    print 'count:', server.scard('stocks')
예제 #22
0
def main():
    arguments_parser = ArgumentParser(description="Create admin user.")
    arguments_parser.add_argument("--db", default=0, type=int, help="database number")
    arguments = arguments_parser.parse_args()
    database_number = vars(arguments)["db"]
    print("Enter your password to create an admin user.")
    database = Redis(db=database_number)
    database.set("user:admin:password", md5(getpass()).hexdigest())
    database.delete("user:admin:profiles")
    database.rpush("user:admin:profiles", "admin")
예제 #23
0
파일: core.py 프로젝트: neilmock/understudy
class Lead(object):
    "Redis publisher."
    def __init__(self, channel, queue=False,
                 host='localhost', port=6379, db=0, password=None):
        self.channel = channel
        self.queue = queue
        self.redis = Redis(host=host, port=port, db=db, password=password)

    def _block(self, uuid):
        self.redis.subscribe(uuid)

        for message in self.redis.listen():
            if message['type'] == 'message':
                action = message['data']

                if action == "COMPLETE":
                    self.redis.unsubscribe(uuid)
                else:
                    print action

        retval = self.redis.get("understudy:result:%s" % uuid)
        self.redis.delete("understudy:result:%s" % uuid)
        self.redis.delete("understudy:log:%s" % uuid)

        return retval

    def _handle(self, directive, block):
        serialized = simplejson.dumps(directive)

        if self.queue:
            self.redis.rpush(self.channel, serialized)
            self.redis.publish(self.channel, "GO!")
        else:
            understudies = self.redis.publish(self.channel, serialized)

            if not understudies:
                raise NoUnderstudiesError

        if block:
            return self._block(directive['uuid'])
        else:
            return Result(directive['uuid'], self.redis)

    def shell(self, command, block=False):
        uuid = str(uuid4())
        directive = {'uuid':uuid, 'handler':'shell', 'action':command}

        return self._handle(directive, block)

    def perform(self, action, block=False):
        uuid = str(uuid4())

        directive = {'uuid':uuid, 'handler':'function', 'action':action}

        return self._handle(directive, block)
예제 #24
0
class Session(SessionInterface):
    serializer = pickle
    session_class = RedisSession

    def __init__(
            self,
            host="localhost",
            port=6379,
            db=0,
            uri=None,
            prefix='session:'):
        if uri:
            self.redis = Redis.from_url(uri)
        else:
            port = int(port)
            db = int(db)
            self.redis = Redis(host, port, db)
        self.prefix = prefix

    def generate_sid(self):
        return str(uuid4())

    def get_redis_expiration_time(self, app, session):
        if session.permanent:
            return app.permanent_session_lifetime
        return timedelta(days=1)

    def open_session(self, app, request):
        sid = request.cookies.get(app.session_cookie_name)
        if not sid:
            sid = self.generate_sid()
            return self.session_class(sid=sid)
        val = self.redis.get(self.prefix + sid)
        if val is not None:
            data = self.serializer.loads(val)
            return self.session_class(data, sid=sid)
        return self.session_class(sid=sid, new=True)

    def save_session(self, app, session, response):
        domain = self.get_cookie_domain(app)
        if not session:
            self.redis.delete(self.prefix + session.sid)
            if session.modified:
                response.delete_cookie(app.session_cookie_name,
                                       domain=domain)
            return
        redis_exp = self.get_redis_expiration_time(app, session)
        cookie_exp = self.get_expiration_time(app, session)
        val = self.serializer.dumps(dict(session))
        self.redis.setex(self.prefix + session.sid, val,
                         int(redis_exp.total_seconds()))
        response.set_cookie(app.session_cookie_name, session.sid,
                            expires=cookie_exp, httponly=True,
                            domain=domain)
예제 #25
0
	def with_redis_initialization(test_class):
				
		# print 'Cleaning Redis for environmet %s'%flapp.config['REDIS_PREFIX']
		
		rcon = Redis()
		keys = rcon.keys(flapp.config['REDIS_PREFIX'] + '*')
		for key in keys:
			print 'Deleting key %s'%key
			rcon.delete(key)

		setup_func(test_class)
		return setup_func
예제 #26
0
class RedisStorage(Storage):
    _redis_connection = None

    def __init__(self, host='localhost', port=6379,
                 db=0, password=None, socket_timeout=None,
                 connection_pool=None, charset='utf-8',
                 errors='strict', decode_responses=False,
                 unix_socket_path=None):
        """

        @param host:
        @type host: str
        @param port:
        @type port: int
        @param db:
        @type db:int
        @param password:
        @type password: str
        @param socket_timeout:
        @param connection_pool:
        @param charset:
        @param errors:
        @param decode_responses:
        @param unix_socket_path:
        """
        self._redis_connection = Redis(host, port, db, password, socket_timeout, connection_pool, charset, errors,
                                       decode_responses, unix_socket_path)

    def _open(self, name, *args, **kwargs):
        return RedisFile(name, redis_connection=self._redis_connection)

    def _save(self, name, content):
        redis_file = RedisFile(name, redis_connection=self._redis_connection)
        redis_file.writelines(content)
        return name

    def get_valid_name(self, name):
        return name

    def get_available_name(self, name):
        while self.exists(name):
            count = itertools.count(1)
            name = u'{0}_{1}'.format(name, count)
        return name

    def url(self, name):
        return u''

    def exists(self, name):
        return self._redis_connection.exists(name)

    def delete(self, name):
        self._redis_connection.delete(name)
예제 #27
0
def post_mapping():
	try:
		mapping=json.loads(request.POST['mapping'])
	except:
		return "NO DICE"
	
	r=Redis()
	r.delete(KEY_MAPPING)
	for k,v in mapping.items():
		r.hset(KEY_MAPPING,k,v)
	
	LOG.info(mapping)	
	return 'OK'
예제 #28
0
class RedisAPI():

    def __init__(self):
        self.rdb = Redis()

    def set(self, session_id, session_value):
        self.rdb.set(session_id, session_value)

    def get(self, session_id):
        return self.rdb.get(session_id)

    def delete(self, session_id):
        self.rdb.delete(session_id)
예제 #29
0
class Master(object):

    def __init__(self):
        self.redisCon = Redis(host=conf.REDIS_HOST,
                              port=conf.REDIS_PORT,
                              password=conf.REDIS_PASSWD)
        self.jobQueue = Queue(connection=self.redisCon)
        map(lambda key: self.redisCon.delete(key), [key for key in self.redisCon.keys() if re.search('visit|rq:', key, re.I)])
        hashData = hashUrl(conf.CRAWL_SITE)
        self.redisCon.lpush('visit', conf.CRAWL_SITE)
        self.redisCon.sadd('visitSet', hashData)


    def start(self):

        initDB()

        countDepth = 0
        countUrls = 0

        while countDepth <= int(conf.CRAWL_DEPTH):

            while True:
                # wait for 10 minites
                # print 'len visite:', self.redisCon.llen('visit')
                # print 'len visited:', self.redisCon.scard('visited')
                url = self.redisCon.lpop('visit')
                if url:
                    countUrls += 1
                    print 'countDepth:', countDepth, 'countUrls:', countUrls
                    self.jobQueue.enqueue_call(crawl, args=(url, countDepth, countUrls))
                else:
                    self.redisCon.delete('visitSet')
                    break

            while True:
                # wait 30 seconds, if timeout, jobqueue is empty(except failed job)
                keyUrl = self.redisCon.blpop('tmpVisit', timeout=30)
                if keyUrl:
                    url = keyUrl[1]
                    hashData = hashUrl(url)
                    if not self.redisCon.sismember('visited', hashData) and \
                            not self.redisCon.sismember('visitSet', hashData):
                        self.redisCon.lpush('visit', url)
                        self.redisCon.sadd('visitSet', hashData)
                else:
                    break

            countDepth += 1
예제 #30
0
def run_analyse(datadir, query_key, query_pending_key, query, uuid,
                SESSION_STORE=None, RESULT_TTL=None, SESSION_TTL=None,
                tissue=None):
    logger.info('Running analysis for {}'.format(query_key))
    if tissue:
        dorina = run.Dorina(datadir, ext=tissue)
    else:
        dorina = run.Dorina(datadir)

    redis_store = Redis(charset="utf-8", decode_responses=True)

    session_store = SESSION_STORE.format(unique_id=uuid)
    custom_regulator_file = '{session_store}/{uuid}.bed'.format(
        session_store=session_store, uuid=uuid)
    set_a = []
    for regulator in query['set_a']:
        if regulator == uuid:
            set_a.append(custom_regulator_file)
        else:
            set_a.append(regulator)
        query['set_a'] = set_a

    if query['set_b'] is not None:
        set_b = []
        for regulator in query['set_b']:
            if regulator == uuid:
                set_b.append(custom_regulator_file)
            else:
                set_b.append(regulator)
        query['set_b'] = set_b
    try:
        logger.debug('Storing analysis result for {}'.format(query_key))
        result = str(dorina.analyse(**query))
        lines = result.splitlines()
        logger.debug("returning {} rows".format(len(lines)))
        redis_store.rpush(query_key, *lines)
        redis_store.setex('results:sessions:{0}'.format(uuid), json.dumps(dict(
            redirect=query_key)), SESSION_TTL)
    except Exception as e:
        result = 'Job failed: %s' % str(e)
        redis_store.setex('sessions:{0}'.format(uuid), json.dumps(dict(
            state='error', uuid=uuid)), SESSION_TTL)
        redis_store.rpush(query_key, result)

    redis_store.expire(query_key, RESULT_TTL)
    redis_store.setex('sessions:{0}'.format(uuid), json.dumps(dict(
        state='done', uuid=uuid)), SESSION_TTL)
    redis_store.delete(query_pending_key)
예제 #31
0
def redis_backend(redis_client: Redis):
    set_name = "test-page-counts"
    redis_client.delete(set_name)

    return RedisBackend(redis_client=redis_client, set_name=set_name)
예제 #32
0
class RedisBayes(object):
    def __init__(self,
                 redis=None,
                 prefix='bayes:',
                 correction=0.1,
                 tokenizer=None):
        self.redis = redis
        self.prefix = prefix
        self.correction = correction
        self.tokenizer = tokenizer or english_tokenizer
        if not self.redis:
            from redis import Redis
            self.redis = Redis()

    def flush(self):
        for cat in self.redis.smembers(self.prefix + 'categories'):
            self.redis.delete(self.prefix + cat)
        self.redis.delete(self.prefix + 'categories')

    def train(self, category, text):
        self.redis.sadd(self.prefix + 'categories', category)
        for word, count in occurances(self.tokenizer(text)).iteritems():
            self.redis.hincrby(self.prefix + category, word, count)

    def untrain(self, category, text):
        for word, count in occurances(self.tokenizer(text)).iteritems():
            cur = self.redis.hget(self.prefix + category, word)
            if cur:
                new = int(cur) - count
                if new > 0:
                    self.redis.hset(self.prefix + category, word, new)
                else:
                    self.redis.hdel(self.prefix + category, word)
        if self.tally(category) == 0:
            self.redis.delete(self.prefix + category)
            self.redis.srem(self.prefix + 'categories', category)

    def classify(self, text):
        score = self.score(text)
        if not score:
            return None
        return sorted(score.iteritems(), key=lambda v: v[1])[-1][0]

    def score(self, text):
        occurs = occurances(self.tokenizer(text))
        scores = {}
        for category in self.redis.smembers(self.prefix + 'categories'):
            tally = self.tally(category)
            if tally == 0:
                continue
            scores[category] = 0.0
            for word, count in occurs.iteritems():
                score = self.redis.hget(self.prefix + category, word)
                assert not score or score > 0, "corrupt bayesian database"
                score = score or self.correction
                scores[category] += math.log(float(score) / tally)
        return scores

    def tally(self, category):
        tally = sum(int(x) for x in self.redis.hvals(self.prefix + category))
        assert tally >= 0, "corrupt bayesian database"
        return tally
예제 #33
0
class Redis(object):
    def __init__(self, db):
        self.conf = ConfigCenter.RedisConfig()

        host = self.conf['host']
        port = self.conf['port']
        passwd = self.conf['passwd']

        self.db = db
        self.__pool = ConnectionPool(host=host,
                                     port=port,
                                     db=self.db,
                                     password=passwd)
        self.__conn = RawRedis(connection_pool=self.__pool)

        print("*** Connect to redis-server succeed.")

    def rset(self, k, v):
        rval = self.rget(k)
        if rval is not None:
            try:
                rval = json.loads(rval)
            except Exception:
                pass

        if equal(v, rval):
            print("db{}:set【{} => <=】".format(self.db, k))
            return

        elif rval is None:
            print("db{}:set【{} () => {}】".format(self.db, k, v))
        else:
            print("db{}:set【{} {} => {}】".format(self.db, k, rval, v))

        if isinstance(v, (dict, list)):
            v = json.dumps(v)

        self.__conn.set(k, v)
        return

    def rget(self, k):
        try:
            res = self.__conn.get(k)
            return None if res is None else res.decode('utf-8')

        except Exception as e:
            print("*** Get value from redis failed.", k, e)
            return None

    def delete(self, k):
        self.__conn.delete(k)
        print("*** db%s:删除【%s】的缓存" % (self.db, k))

    @property
    def dbsize(self):
        return self.__conn.dbsize()

    def pipeset(self, lists):

        pipe = self.__conn.pipeline(transaction=True)

        for list_detail in lists:
            k = list(list_detail.keys())[0]
            v = list_detail[key]
            self.rset(k, v)

        pipe.execute()

    @property
    def scan_iter(self):
        ''' Scan Redis db to iterator.
        '''
        for k in self.__conn.keys():
            yield k.decode('utf-8'), self.rget(k)

    def __update_dict_to_redis__(self, k, v):
        ''' __update_dict_to_redis__
        Merge dict rather than replace it.
        '''
        if self.rget(k) is not None:
            bf_val = self.rget(k)
            try:
                bf_val = json.loads(bf_val)
                bf_val = dict(bf_val, **v)
                self.rset(k, bf_val)
            except Exception as e:
                print("__update_dict_to_redis__ failed.", e)
                pass
        else:
            self.rset(k, v)
예제 #34
0
class RedisMatching:
    def __init__(self, config):
        self.db = Redis(host=config['host'],
                        port=config['port'],
                        db=config['db'],
                        decode_responses=True)
        self.msg_client = Client(config['account_sid'],
                                 config['auth_token']).messages
        self.msg_sender = config['sender']

    def set_userdata(self, user_data):
        user_data['timestamp'] = int(time())
        return self.db.hmset(user_data['phone_numb'], user_data)

    def get_userdata(self, user_name):
        return self.db.hgetall(user_name)

    def get_all_user(self):
        return self.db.keys()

    def remove_userdata(self, user_name):
        return self.db.delete(user_name)

    def send_message(self, phone_numb, message):
        phone_numb = '+82' + phone_numb[1:]
        result = self.msg_client.create(to=phone_numb,
                                        from_=self.msg_sender,
                                        body=message)
        return result

    def initialize(self):
        for key in self.get_all_user():
            self.db.delete(key)

    def match_user(self):
        def make_food_vector(user_data):
            food_vector = [
                int(user_data['cutlet']),
                int(user_data['hamburger']),
                int(user_data['noodle']),
                int(user_data['korean_food'])
            ]
            return food_vector

        def make_message_text(target_name, user_data, similarity=0):
            if user_data['gender'] == 'male': user_data['gender'] = '남'
            elif user_data['gender'] == 'female': user_data['gender'] = '여'
            text ='%s님, 혼밥러 매칭완료!\n\n상대 : %s님\n연락처 : %s\n성별 : %s\n유사도 : %0.2f' \
                    %(target_name, user_data['user_name']
                        , user_data['phone_numb'], user_data['gender']
                        , similarity*100)
            return text

        while (len(self.get_all_user()) >= 2):
            user_list = self.get_all_user()
            user_data_list = list(map(self.get_userdata, user_list))
            user_data_list = [[user_data['timestamp'], user_data]
                              for user_data in user_data_list]
            user_data_list = [
                user_data[1] for user_data in sorted(user_data_list)
            ]
            print(user_data_list)

            target_user = user_data_list[0]
            target_vec = make_food_vector(target_user)

            similarity_list = []
            for user_data in user_data_list[1:]:
                user_vec = make_food_vector(user_data)
                similarity = RedisMatching.cosine_similarity(
                    target_vec, user_vec)
                similarity_list.append((similarity, user_data))

            max_val = max(similarity_list)
            print(max_val)
            text = make_message_text(target_user['user_name'], max_val[1],
                                     max_val[0])
            self.send_message(target_user['phone_numb'], text)
            print(text)
            text = make_message_text(max_val[1]['user_name'], target_user,
                                     max_val[0])
            self.send_message(max_val[1]['phone_numb'], text)
            print(text)

            # self.remove_userdata(target_user['phone_numb'])
            # self.remove_userdata(max_val[1]['phone_numb'])

            return True

    @staticmethod
    def cvt_unit_vec(vector):
        scalar = math.sqrt(sum([i * i for i in vector]))
        return [idx / scalar for idx in vector]

    @staticmethod
    def cosine_similarity(a, b):
        a = RedisMatching.cvt_unit_vec(a)
        b = RedisMatching.cvt_unit_vec(b)
        return sum([i * j for i, j in zip(a, b)])

    @staticmethod
    def custom_similarity(a, b):
        # a = RedisMatching.cvt_unit_vec(a)
        # b = RedisMatching.cvt_unit_vec(b)
        c = [i - j for i, j in zip(a, b)]
        c = [i * i for i in c]
        return 300 - math.sqrt(sum(c))
예제 #35
0
def delete_all_indexed_offers(client: Redis) -> None:
    try:
        client.delete(RedisBucket.REDIS_HASHMAP_INDEXED_OFFERS_NAME.value)
    except redis.exceptions.RedisError as error:
        logger.exception("[REDIS] %s", error)
예제 #36
0
 def test_it_removes(self):
     r = Redis(host="baz-redis")
     self.assertTrue(r.delete("test"))
예제 #37
0
파일: app.py 프로젝트: savkov/planchet
def _remove_token(job_name: str, ledger: Redis):
    ledger.delete(f'TOKEN:{job_name}')
예제 #38
0
class SessionStore(SessionBase):
    """
    Implements database session store.
    """
    def __init__(self, session_key=None):
        super(SessionStore, self).__init__(session_key)
        self.db = Redis(
            settings.RSESSION.get('HOST', 'localhost'),
            settings.RSESSION.get('PORT', 6379),
            settings.RSESSION.get('DB', 0),
            settings.RSESSION.get('PASSWORD', ''),
        )

    def load(self):
        session_data = self.db.get(KEY_PREFIX % self.session_key)
        if session_data is None:
            self.create()
            return {}
        return self.decode(force_unicode(session_data))

    def exists(self, session_key):
        if self.db.exists(KEY_PREFIX % session_key):
            return True
        return False

    def create(self):
        while True:
            self._session_key = self._get_new_session_key()
            try:
                # Save immediately to ensure we have a unique entry in the
                # database.
                self.save(must_create=True)
            except CreateError:
                # Key wasn't unique. Try again.
                continue
            self.modified = True
            self._session_cache = {}
            return

    def save(self, must_create=False):
        """
        Saves the current session data to the database. If 'must_create' is
        True, a database error will be raised if the saving operation doesn't
        create a *new* entry (as opposed to possibly updating an existing
        entry).
        """
        if must_create and self.exists(self.session_key):
            raise CreateError

        setter_fn = self.db.setnx if must_create else self.db.set

        key = KEY_PREFIX % self.session_key
        result = setter_fn(key,
                           self.encode(self._get_session(no_load=must_create)))

        if must_create and result is False:
            raise CreateError

        self.db.expire(key, self.get_expiry_age())

    def delete(self, session_key=None):
        if session_key is None:
            if self._session_key is None:
                return
            session_key = self._session_key

        self.db.delete(KEY_PREFIX % session_key)
예제 #39
0
class HotQueue(object):
    """Simple FIFO message queue stored in a Redis list. Example:

    >>> from hotqueue import HotQueue
    >>> queue = HotQueue("myqueue", host="localhost", port=6379, db=0)

    :param name: name of the queue
    :param serializer: the class or module to serialize msgs with, must have
        methods or functions named ``dumps`` and ``loads``,
        `pickle <http://docs.python.org/library/pickle.html>`_ is the default,
        use ``None`` to store messages in plain text (suitable for strings,
        integers, etc)
    :param kwargs: additional kwargs to pass to :class:`Redis`, most commonly
        :attr:`host`, :attr:`port`, :attr:`db`
    """
    def __init__(self, name, serializer=pickle, redis_instance=None, **kwargs):
        self.name = name
        self.serializer = serializer
        if not redis_instance:
            self.__redis = Redis(**kwargs)
        else:
            self.__redis = redis_instance

    def __len__(self):
        return self.__redis.llen(self.key)

    @property
    def redis_instance(self):
        return self.__redis

    @property
    def key(self):
        """Return the key name used to store this queue in Redis."""
        return key_for_name(self.name)

    def clear(self):
        """Clear the queue of all messages, deleting the Redis key."""
        self.__redis.delete(self.key)

    def consume(self, **kwargs):
        """Return a generator that yields whenever a message is waiting in the
        queue. Will block otherwise. Example:

        >>> for msg in queue.consume(timeout=1):
        ...     print msg
        my message
        another message

        :param kwargs: any arguments that :meth:`~hotqueue.HotQueue.get` can
            accept (:attr:`block` will default to ``True`` if not given)
        """
        kwargs.setdefault('block', True)
        try:
            while True:
                msg = self.get(**kwargs)
                if msg is None:
                    break
                yield msg
        except KeyboardInterrupt:
            print
            return

    def get(self, block=False, timeout=None):
        """Return a message from the queue. Example:

        >>> queue.get()
        'my message'
        >>> queue.get()
        'another message'

        :param block: whether or not to wait until a msg is available in
            the queue before returning; ``False`` by default
        :param timeout: when using :attr:`block`, if no msg is available
            for :attr:`timeout` in seconds, give up and return ``None``
        """
        if block:
            if timeout is None:
                timeout = 0
            msg = self.__redis.blpop(self.key, timeout=timeout)
            if msg is not None:
                msg = msg[1]
        else:
            msg = self.__redis.lpop(self.key)
        if msg is not None and self.serializer is not None:
            msg = self.serializer.loads(msg)
        return msg

    def put(self, *msgs):
        """Put one or more messages onto the queue. Example:

        >>> queue.put("my message")
        >>> queue.put("another message")

        To put messages onto the queue in bulk, which can be significantly
        faster if you have a large number of messages:

        >>> queue.put("my message", "another message", "third message")
        """
        if self.serializer is not None:
            msgs = map(self.serializer.dumps, msgs)
        self.__redis.rpush(self.key, *msgs)

    def worker(self, *args, **kwargs):
        """Decorator for using a function as a queue worker. Example:

        >>> @queue.worker(timeout=1)
        ... def printer(msg):
        ...     print msg
        >>> printer()
        my message
        another message

        You can also use it without passing any keyword arguments:

        >>> @queue.worker
        ... def printer(msg):
        ...     print msg
        >>> printer()
        my message
        another message

        :param kwargs: any arguments that :meth:`~hotqueue.HotQueue.get` can
            accept (:attr:`block` will default to ``True`` if not given)
        """
        def decorator(worker):
            @wraps(worker)
            def wrapper(*args):
                for msg in self.consume(**kwargs):
                    worker(*args + (msg, ))

            return wrapper

        if args:
            return decorator(*args)
        return decorator
예제 #40
0
def main():
    logging.getLogger().setLevel(logging.INFO)
    logging.info("Starting time is %s .", datetime.now())

    cmdLineParser = ArgumentParser()
    cmdLineParser.add_argument("-ih",
                               "--ibroker_host",
                               action="store",
                               type=str,
                               dest="ibroker_host",
                               default="127.0.0.1",
                               help="The host of IB app to use")
    cmdLineParser.add_argument("-ip",
                               "--ibroker_port",
                               action="store",
                               type=int,
                               dest="ibroker_port",
                               default=7497,
                               help="The TCP port for IB to use")
    cmdLineParser.add_argument("-rh",
                               "--redis_host",
                               action="store",
                               type=str,
                               dest="redis_host",
                               default="127.0.0.1",
                               help="The host of Redis app to use")
    cmdLineParser.add_argument("-rp",
                               "--redis_port",
                               action="store",
                               type=int,
                               dest="redis_port",
                               default=6379,
                               help="The TCP port for redis to use")
    args = cmdLineParser.parse_args()

    # create a redis connection and queue
    redis_connection = Redis(host=args.redis_host, port=args.redis_port)
    redis_queue = Queue(connection=redis_connection)

    app = IBApp(args.ibroker_host,
                args.ibroker_port,
                client_id=1,
                redis_queue=redis_queue)

    time.sleep(CONNECT_SERVER_SLEEP_TIME
               )  # Sleep interval to allow time for connection to server

    try:
        while True:
            try:

                # get a new tasks for tick price
                new_tasks_tick_price = redis_connection.lrange(
                    'new_tasks_tick_price', 0, -1)
                redis_connection.delete('new_tasks_tick_price')
                if new_tasks_tick_price:
                    for task in new_tasks_tick_price:
                        task = json.loads(task)
                        app.register_tick_price_alert(task)

                # get a new tasks for history data
                new_tasks_history_data = redis_connection.lrange(
                    'new_tasks_historical_data', 0, -1)
                redis_connection.delete('new_tasks_historical_data')
                if new_tasks_history_data:
                    for task in new_tasks_history_data:
                        task = json.loads(task)
                        app.register_historical_data_alert(task)

                # sleep for new task
                time.sleep(REDIS_GET_TASKS_DELAY)

            except KeyboardInterrupt:
                logging.warning('Closing application')
                break
            except TypeError:
                logging.error('Wrong data from redis')
                continue
    finally:
        redis_connection.close()
        app.disconnect()
from redis import Redis

redis_conn = Redis()

SHEEP_SET_KEY = "sheep_seen"
SHEEP_HLL_KEY = "sheep_seen_hll"

redis_conn.delete(SHEEP_SET_KEY)
redis_conn.delete(SHEEP_HLL_KEY)

for m in range(0, 100000):
    sheep_id = str(m)
    pipeline = redis_conn.pipeline(transaction=False)
    pipeline.sadd(SHEEP_SET_KEY, sheep_id)
    pipeline.pfadd(SHEEP_HLL_KEY, sheep_id)
    pipeline.execute()

print(
    f"There are {redis_conn.scard(SHEEP_SET_KEY)} sheep (set: {redis_conn.memory_usage(SHEEP_SET_KEY)})."
)
print(
    f"There are {redis_conn.pfcount(SHEEP_HLL_KEY)} sheep (hyperloglog: {redis_conn.memory_usage(SHEEP_HLL_KEY)})."
)
예제 #42
0
from redis import Redis

redis_connection = Redis(decode_responses=True)

key = "first_name"
value = "Maksymilian"

redis_connection.set(key, value)
print(redis_connection.get(key))
redis_connection.delete(key)
import time
from redis import Redis
"""
Read the BlockedIPs file instead of using TZU real data. Speed is manageable and data is replicatable.
Make a Redis hash with key ip and value is timestamp
save HGETALL of hash to get a dict of the keys and values Evens would be keys (IP) Odd values (times)
iterate over odds in list and if the time is a minute older than the recent attack_time HDEL the list item before it
"""

r = Redis()

with open('blockedIPs.txt', 'r') as f:
    r.delete('iphash')
    for line in f:
        attack_date, attack_time, attack_ip, attack_protocol = line.split()
        r.hset('iphash', attack_ip, attack_time)
        ip_dict = r.hgetall('iphash')
        for ip in ip_dict:
            iterated_time = int(ip_dict[ip].decode()[6:])
            time_to_add = int(attack_time[6:])
            print(iterated_time, time_to_add)
            if iterated_time - time_to_add < 0:
                r.hdel('iphash', ip)
                print('deleted', ip)

        time.sleep(0.5)
예제 #44
0
class ETLRedis:
    def __init__(self):
        cnf = ETLSettings()
        self.prefix = cnf.redis_prefix + ':'
        self.queuename = self.prefix + 'filmids'
        self.workqueuename = self.queuename + ':work'

        self.redis = Redis(
            host=cnf.redis_host,
            port=cnf.redis_port,
            password=cnf.redis_password,
            decode_responses=True,
        )

    @backoff(start_sleep_time=0.001, jitter=False)
    def set_status(self, service: str, status: str) -> str:
        key = self.prefix + 'status:' + service
        self.redis.set(key, status)
        return self.redis.get(key)

    @backoff(start_sleep_time=0.001, jitter=False)
    def get_status(self, service: str) -> str:
        key = self.prefix + 'status:' + service
        return self.redis.get(key)

    @backoff(start_sleep_time=0.001, jitter=False)
    def set_lasttime(self, table: str, lasttime: datetime) -> datetime:
        key = self.prefix + table + ':lasttime'
        self.redis.set(key, lasttime.isoformat())
        time = self.redis.get(key)
        return datetime.fromisoformat(time)

    @backoff(start_sleep_time=0.001, jitter=False)
    def get_lasttime(self, table: str) -> datetime:
        key = self.prefix + table + ':lasttime'
        time = self.redis.get(key)
        return time

    @backoff(start_sleep_time=0.001, jitter=False)
    def push_filmid(self, id: str) -> str:
        """
        Attomic unique push film id in Redis queue
        """
        script = 'redis.call("LREM",KEYS[1], "0", ARGV[1]);'
        script += 'return redis.call("LPUSH", KEYS[1], ARGV[1])'
        self.redis.eval(script, 1, self.queuename, id)

    @backoff(start_sleep_time=0.001, jitter=False)
    def push_tableid(self, id: str, table: str) -> None:
        """
        Attomic push unique id from table to Redis queue
        """
        queuename = self.prefix + table + ':ids'
        pipe = self.redis.pipeline(transaction=True)
        pipe.lrem(queuename, 0, id)
        pipe.lpush(queuename, id)
        pipe.execute()

    @backoff(start_sleep_time=0.001, jitter=False)
    def get_filmid_for_work(self, size) -> list:
        """
        Move film id from queue to workqueue to load or update it in elastic
        """
        size -= self.redis.llen(self.workqueuename)
        while size > 0:
            self.redis.rpoplpush(self.queuename, self.workqueuename)
            size -= 1
        length = self.redis.llen(self.workqueuename)
        workid = self.redis.lrange(self.workqueuename, 0, length)
        return workid

    @backoff(start_sleep_time=0.001, jitter=False)
    def get_tableid_for_work(self, size, table) -> list:
        """
        Move table id from queue to workqueue to load or update it in elastic
        """
        queuename = self.prefix + table + ':ids'
        workqueuename = queuename + ':work'
        size -= self.redis.llen(workqueuename)
        while size > 0:
            self.redis.rpoplpush(queuename, workqueuename)
            size -= 1
        length = self.redis.llen(workqueuename)
        workid = self.redis.lrange(workqueuename, 0, length)
        return workid

    @backoff(start_sleep_time=0.001, jitter=False)
    def del_work_queuename(self, table=''):
        # Добавлено до момента, перехода на универсальные функции push/get tableid
        workqueuename = self.workqueuename if table == '' else self.prefix + table + ':ids:work'
        self.redis.delete(workqueuename)
예제 #45
0
tmp_rd.set('allow_site', ','.join(allow_site))
tmp_rd.set('ARIA2_HOST', ARIA2_HOST)
tmp_rd.set('ARIA2_PORT', ARIA2_PORT)
tmp_rd.set('ARIA2_SECRET', ARIA2_SECRET)
tmp_rd.set('ARIA2_SCHEME', ARIA2_SCHEME)
tmp_rd.set('password', password)
tmp_rd.set('verify_url', verify_url)
tmp_rd.set('balance', balance)
tmp_rd.set('admin_prefix', admin_prefix)
tmp_rd.set('thread_num', thread_num)
config_path = os.path.join(config_dir, 'self_config.py')
with open(config_path, 'r') as f:
    text = f.read()
tmp_rd.set('users', re.findall('od_users=([\w\W]*})', text)[0])
key = 'themelist'
tmp_rd.delete(key)
######################函数
app.jinja_env.globals['version'] = config.version
app.jinja_env.globals['FetchData'] = FetchData
app.jinja_env.globals['path_list'] = path_list
app.jinja_env.globals['CanEdit'] = CanEdit
app.jinja_env.globals['quote'] = urllib.quote
app.jinja_env.globals['len'] = len
app.jinja_env.globals['enumerate'] = enumerate
app.jinja_env.globals['breadCrumb'] = breadCrumb
app.jinja_env.globals['list'] = list
app.jinja_env.globals['os'] = os
app.jinja_env.globals['re'] = re
app.jinja_env.globals['file_ico'] = file_ico
app.jinja_env.globals['CutText'] = CutText
app.jinja_env.globals['GetConfig'] = GetConfig
예제 #46
0
class PoolWalletManager(object):
    _key = "pool_wallet:address:{}"

    def __init__(self):
        super().__init__()

        self.database = load_plugin("chain.plugins.database")

        self.redis = Redis(
            host=os.environ.get("REDIS_HOST", "localhost"),
            port=os.environ.get("REDIS_PORT", 6379),
            db=os.environ.get("REDIS_DB", 0),
        )

    def key_for_address(self, address):
        return self._key.format(address)

    def clear_wallets(self):
        """Clear all pool wallets from redis
        """
        keys = self.redis.keys(self.key_for_address("*"))
        if keys:
            self.redis.delete(*keys)

    def save_wallet(self, wallet):
        self.redis.set(self.key_for_address(wallet.address), wallet.to_json())

    def find_by_address(self, address):
        """Finds a wallet by a given address. If wallet is not found, it is copied from
        blockchain wallet manager.

        :param string address: wallet address
        :returns Wallet: wallet object
        """
        key = self.key_for_address(address)
        if not self.redis.exists(key):
            self.save_wallet(self.database.wallets.find_by_address(address))
        wallet = json.loads(self.redis.get(key))
        return Wallet(wallet)

    def find_by_public_key(self, public_key):
        """Finds a wallet by public key.

        It calculates the address from public key and uses the `find_by_address`
        function, which if wallet is not found for this address, it copies it from
        blockchain wallet manager.

        :param string public_key: wallet's public key
        :returns Wallet: wallet object
        """
        address = address_from_public_key(public_key)
        return self.find_by_address(address)

    def exists_by_address(self, address):
        key = self.key_for_address(address)
        return self.redis.exists(key)

    def exists_by_public_key(self, public_key):
        address = address_from_public_key(public_key)
        return self.exists_by_address(address)

    def delete_by_public_key(self, public_key):
        """Deletes a wallet by public key

        :param string public_key: wallets' public key
        """
        address = address_from_public_key(public_key)
        key = self.key_for_address(address)
        self.redis.delete(key)

    def can_apply_to_sender(self, transaction, block_height):
        """Checks if transaction can be applied to senders wallet

        :param Transaction transaction: Crypto transaction object
        :returns bool: True if can be applied, False otherwise
        """

        # If sender is not yet known or has no balance, they can't apply
        # The check is performed against the database wallet manager and not pool
        # wallet manager
        if self.database.wallets.exists(transaction.sender_public_key):
            db_wallet = self.database.wallets.find_by_public_key(
                transaction.sender_public_key)
            if db_wallet.balance == 0:
                logger.warning(
                    "Wallet is not allowed to send as it doesn't have any funds"
                )
                return False

        if is_transaction_exception(transaction.id):
            logger.warning(
                ("Transaction forcibly applied because it has been added as an "
                 "exception"),
                transaction.id,
            )
            return True
        sender = self.find_by_public_key(transaction.sender_public_key)
        return transaction.can_be_applied_to_wallet(sender,
                                                    self.database.wallets,
                                                    block_height)
예제 #47
0
class RedisClient(object):
    """
    Redis client

    Redis中代理存放的结构为hash:
    key为ip:port, value为代理属性的字典;

    """
    def __init__(self, **kwargs):
        """
        init
        :param host: host
        :param port: port
        :param password: password
        :param db: db
        :return:
        """
        self.name = ""
        kwargs.pop("username")
        self.__conn = Redis(connection_pool=BlockingConnectionPool(
            decode_responses=True, **kwargs))

    def get(self, proxy_str=None):
        """
        返回一个代理
        :return:
        """
        proxies = self.__conn.hkeys(self.name)
        if not proxies:
            return None
        if proxy_str is None:
            proxy = choice(proxies)
        else:
            proxy = proxy_str
        return self.__conn.hget(self.name, proxy)

    def put(self, proxy_obj):
        """
        将代理放入hash, 使用changeTable指定hash name
        :param proxy_obj: Proxy obj
        :return:
        """
        data = self.__conn.hset(self.name, proxy_obj.proxy, proxy_obj.to_json)
        return data

    def pop(self):
        """
        弹出一个代理
        :return: dict {proxy: value}
        """
        proxies = self.__conn.hkeys(self.name)
        for proxy in proxies:
            proxy_info = self.__conn.hget(self.name, proxy)
            self.__conn.hdel(self.name, proxy)
            return proxy_info
        else:
            return False

    def delete(self, proxy_str):
        """
        移除指定代理, 使用changeTable指定hash name
        :param proxy_str: proxy str
        :return:
        """
        return self.__conn.hdel(self.name, proxy_str)

    def exists(self, proxy_str):
        """
        判断指定代理是否存在, 使用changeTable指定hash name
        :param proxy_str: proxy str
        :return:
        """
        return self.__conn.hexists(self.name, proxy_str)

    def update(self, proxy_obj):
        """
        更新 proxy 属性
        :param proxy_obj:
        :return:
        """
        return self.__conn.hset(self.name, proxy_obj.proxy, proxy_obj.to_json)

    def getAll(self):
        """
        字典形式返回所有代理, 使用changeTable指定hash name
        :return:
        """
        item_dict = self.__conn.hgetall(self.name)
        return item_dict

    def clear(self):
        """
        清空所有代理, 使用changeTable指定hash name
        :return:
        """
        return self.__conn.delete(self.name)

    def getCount(self):
        """
        返回代理数量
        :return:
        """
        return self.__conn.hlen(self.name)

    def changeTable(self, name):
        """
        切换操作对象
        :param name:
        :return:
        """
        self.name = name
예제 #48
0
        task = pickle.loads(r.blpop("parse_tasks")[1])
        try:
            parsed = parse_messages(task["data"])
            filtered = list(bf.filter_messages(parsed))
            for message in filtered:
                message["user_id"] = task["user_id"]
                message["date"] = message["time"].strftime("%Y-%m-%d")
                message["time"] = message["time"].strftime("%Y-%m-%d %H:%M:%S")
                if not db.messages.find_one({
                        "$and": [{
                            "user_id": message["user_id"]
                        }, {
                            "qq": message["qq"]
                        }, {
                            "time": message["time"]
                        }]
                }):
                    db.messages.insert_one(message)
                    if message["author"] == message["qq"]:
                        r.rpush("nick_queries", message["qq"])
                else:
                    logger.debug("Skipped message from QQ:{}".format(
                        message["qq"]))
            r.delete(f"user.{task['user_id']}.dates")
            logger.info(
                f"Processed chat log from {task['user_id']}, file size {len(parsed)}, {len(filtered)} messages remained."
            )
        except Exception:
            logger.error("Exception at {}".format(time()))
            logger.error(format_exc())
예제 #49
0
 def flush_redis(self):
     redis = Redis(
         host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB
     )
     redis.delete("links")
     self.client = Client()
예제 #50
0
class HotResque(object):
    """Simple FIFO message queue stored in a Redis list. Example:

    >>> from HotResque import HotResque
    >>> queue = HotResque("myqueue", host="localhost", port=6379, db=0)
    
    :param name: name of the queue
    :param serializer: the class or module to serialize msgs with, must have
        methods or functions named ``dumps`` and ``loads``,
        `pickle <http://docs.python.org/library/pickle.html>`_ is the default,
        use ``None`` to store messages in plain text (suitable for strings,
        integers, etc)
    :param kwargs: additional kwargs to pass to :class:`Redis`, most commonly
        :attr:`host`, :attr:`port`, :attr:`db`

    ==============================================
    
    Para pegar uma migracao que está na fila

    >>> from hotresque import HotResque
    >>> a = HotResque("queue:migrations")
    >>> a.name_queue = "resque"
    >>> c = a.get() 

    o GET do hotresque retorna um dicionario contendo todos os dados.

    conteudo de "c"
    {
        u'port_dest': 443, 
        u'host_dest': u'destino.teste.com', 
        u'username_dest': None,
        u'password_dest': None, 
        u'migration_id': 9, 
        u'port_orig': 443, 
        u'password_orig': u'teste123', 
        u'host_orig': u'origem.teste.com', 
        u'username_orig': u'*****@*****.**'
     }

    >>> c['port_dest']
    443
    >>> c['username_dest']

    Para setar o status da migracao:

    >>> import json
    >>> from hotresque import HotResque
    >>> a = HotResque("queue:migrations_report")
    >>> a.name_queue = "resque"
    >>> resp = {"class":"MigrationReport", "args" : [json.dumps({"migration_id" : 5, "state":"ok|error" , "message":"mensagem..."}) ]}
    >>> a.put(resp)

    """
    def __init__(self, name, serializer=json, **kwargs):
        self.name = name
        self.serializer = serializer
        self.__redis = Redis(**kwargs)
        self.name_queue = "hotresque"

    def __len__(self):
        return self.__redis.llen(self.key)

    def name_queue():
        doc = "The name_queue property."

        def fget(self):
            return self._name_queue

        def fset(self, value):
            self._name_queue = value

        def fdel(self):
            del self._name_queue

        return locals()

    name_queue = property(**name_queue())

    @property
    def key(self):
        """Return the key name used to store this queue in Redis."""
        return '%s:%s' % (self.name_queue, self.name)

    def clear(self):
        """Clear the queue of all messages, deleting the Redis key."""
        self.__redis.delete(self.key)

    def consume(self, **kwargs):
        """Return a generator that yields whenever a message is waiting in the
        queue. Will block otherwise. Example:
        
        >>> for msg in queue.consume(timeout=1):
        ...     print msg
        my message
        another message
        
        :param kwargs: any arguments that :meth:`~HotResque.HotResque.get` can
            accept (:attr:`block` will default to ``True`` if not given)
        """
        kwargs.setdefault('block', True)
        try:
            while True:
                msg = self.get(**kwargs)
                if msg is None:
                    break
                yield msg
        except KeyboardInterrupt:
            print
            return

    def get(self, block=False, timeout=None):
        """Return a message from the queue. Example:
    
        >>> queue.get()
        'my message'
        >>> queue.get()
        'another message'
        
        :param block: whether or not to wait until a msg is available in
            the queue before returning; ``False`` by default
        :param timeout: when using :attr:`block`, if no msg is available
            for :attr:`timeout` in seconds, give up and return ``None``
        """
        if block:
            if timeout is None:
                timeout = 0
            msg = self.__redis.blpop(self.key, timeout=timeout)
            if msg is not None:
                msg = msg[1]
        else:
            msg = self.__redis.lpop(self.key)
        if msg is not None and self.serializer is not None:
            msg = self.serializer.loads(msg)
            msg = json.loads(msg['args'][0])
        return msg

    def put(self, *msgs):
        """Put one or more messages onto the queue. Example:
        
        >>> queue.put("my message")
        >>> queue.put("another message")
        
        To put messages onto the queue in bulk, which can be significantly
        faster if you have a large number of messages:
        
        >>> queue.put("my message", "another message", "third message")
        """
        if self.serializer is not None:
            msgs = map(self.serializer.dumps, msgs)
        self.__redis.rpush(self.key, *msgs)

    def worker(self, *args, **kwargs):
        """Decorator for using a function as a queue worker. Example:
        
        >>> @queue.worker(timeout=1)
        ... def printer(msg):
        ...     print msg
        >>> printer()
        my message
        another message
        
        You can also use it without passing any keyword arguments:
        
        >>> @queue.worker
        ... def printer(msg):
        ...     print msg
        >>> printer()
        my message
        another message
        
        :param kwargs: any arguments that :meth:`~HotResque.HotResque.get` can
            accept (:attr:`block` will default to ``True`` if not given)
        """
        def decorator(worker):
            @wraps(worker)
            def wrapper(*args):
                for msg in self.consume(**kwargs):
                    worker(*args + (msg, ))

            return wrapper

        if args:
            return decorator(*args)
        return decorator
예제 #51
0
class Rediz:
    cache: ClearableCache  # type hint for IDE's pleasure only

    def __init__(self, config: Config = config):

        # globals
        self.server = config.redis_server
        self.port = config.redis_port
        self.key = config.redis_key
        self.ttl = config.redis_task_ttl
        self.timeout = config.redis_task_timeout
        self.task_result_ttl = config.redis_task_result_ttl
        self.routes = routes.routes
        self.core_q = config.redis_core_q
        # config check if TLS required
        if config.redis_tls_enabled:
            self.base_connection = Redis(
                                        host=self.server,
                                        port=self.port,
                                        password=self.key,
                                        ssl=True,
                                        ssl_cert_reqs='required',
                                        ssl_keyfile=config.redis_tls_key_file,
                                        ssl_certfile=config.redis_tls_cert_file,
                                        ssl_ca_certs=config.redis_tls_ca_cert_file,
                                        socket_connect_timeout=config.redis_socket_connect_timeout,
                                        socket_keepalive=config.redis_socket_keepalive
                                        )
        else:
            self.base_connection = Redis(
                                        host=self.server,
                                        port=self.port,
                                        password=self.key,
                                        socket_connect_timeout=config.redis_socket_connect_timeout,
                                        socket_keepalive=config.redis_socket_keepalive
                                        )
#        self.base_q = Queue(self.core_q, connection=self.base_connection)
        self.networked_queuedb = config.redis_queue_store
        self.redis_pinned_store = config.redis_pinned_store

        self.local_queuedb = {}
        self.local_queuedb[config.redis_fifo_q] = {}
        self.local_queuedb[config.redis_fifo_q]["queue"] = Queue(config.redis_fifo_q, connection=self.base_connection)

        # init networked db for processes queues
        net_db_exists = self.base_connection.get(self.networked_queuedb)
        if not net_db_exists:
            null_network_db = json.dumps({"netpalm-db": "queue-val"})
            self.base_connection.set(self.networked_queuedb, null_network_db)

        # init pinned db
        pinned_db_exists = self.base_connection.get(self.redis_pinned_store)
        if not pinned_db_exists:
            null_pinned_db = json.dumps([])
            self.base_connection.set(self.redis_pinned_store, null_pinned_db)

        self.cache_enabled = config.redis_cache_enabled
        self.cache_timeout = config.redis_cache_default_timeout
        # we MUST have a prefix, else ".clear()" will drop ALL keys in redis (including those used for the queues).
        self.key_prefix = str(config.redis_cache_key_prefix).strip()
        if not self.key_prefix:
            self.key_prefix = "NOPREFIX"
        if self.cache_enabled:
            log.info(f"Enabling cache!")
            self.cache = ClearableCache(self.base_connection, default_timeout=self.cache_timeout,
                                        key_prefix=self.key_prefix)
        else:
            log.info(f"Disabling cache!")
            # noinspection PyTypeChecker
            self.cache = DisabledCache()
        self.extn_update_log = ExtnUpdateLog(self.base_connection, config.redis_update_log)

    def append_network_queue_db(self, qn):
        """appends to the networked queue db"""
        result = self.base_connection.get(self.networked_queuedb)
        tmpdb = json.loads(result)
        tmpdb[qn] = True
        jsresult = json.dumps(tmpdb)
        self.base_connection.set(self.networked_queuedb, jsresult)

    def append_local_queue_db(self, qn):
        """appends to the local queue db"""
        self.local_queuedb[qn] = {}
        self.local_queuedb[qn]["queue"] = Queue(qn, connection=self.base_connection)
        return self.local_queuedb[qn]["queue"]

    def exists_in_local_queue_db(self, qn):
        q_exists_in_local_db = self.local_queuedb.get(qn, False)
        return q_exists_in_local_db

    def worker_is_alive(self, q):
        """checks if a worker exists on a given queue"""
        try:
            queue = Queue(q, connection=self.base_connection)
            workers = Worker.all(queue=queue)
            if len(workers) >= 1:
                return True
            else:
                log.info(f"worker required for {q}")
                return False
        except Exception as e:
            log.error(f"worker_is_alive: {e}")
            return False

    def getqueue(self, host):
        """
            checks whether a queue exists and worker exists
            accross the controller, redis and worker node.
            creates a local queue if required
        """
        # checks a centralised db / queue exists and creates a empty db if one does not exist
        try:
            # check the redis db store for a queue
            result = self.base_connection.get(self.networked_queuedb)
            jsresult = json.loads(result)
            res = jsresult.get(host, False)
            # if exists on the networked db, check whether you have a local connection
            if res:
                if not self.worker_is_alive(host):
                    return False
                # create a local connection if required
                if not self.exists_in_local_queue_db(qn=host):
                    self.append_local_queue_db(qn=host)
                return True
            else:
                return False

        except Exception as e:
            return e

    def get_redis_meta_template(self):
        """template for redis meta data"""
        meta_template = {
            "errors": [],
            "enqueued_elapsed_seconds": None,
            "started_elapsed_seconds": None,
            "total_elapsed_seconds": None,
            "result": ""
        }
        return meta_template

    def create_queue_worker(self, pinned_container_queue, pinned_worker_qname):
        """
            creates a local queue on the worker and executes a rpc to create a
            pinned worker on a remote container
        """
        from netpalm.netpalm_pinned_worker import pinned_worker_constructor
        try:
            log.info(f"create_queue_worker: creating queue and worker {pinned_worker_qname}")
            meta_template = self.get_redis_meta_template()
            self.append_network_queue_db(qn=pinned_worker_qname)
            self.local_queuedb[pinned_container_queue]["queue"].enqueue_call(func=pinned_worker_constructor, args=(pinned_worker_qname,), meta=meta_template,
                                     ttl=self.ttl, result_ttl=self.task_result_ttl)
            r = self.append_local_queue_db(qn=pinned_worker_qname)
            return r
        except Exception as e:
            return e

    def reoute_and_create_q_worker(self, hst):
        """routes a process to the correct container."""
        qexists = self.getqueue(hst)
        if not qexists:
            # check for process availability:
            pinned_hosts = self.fetch_pinned_store()
            capacity = False
            # find first available container with process capacity
            for host in pinned_hosts:
                if host["count"] < host["limit"]:
                    # create in the local db if required
                    if not self.exists_in_local_queue_db(qn=host["pinned_listen_queue"]):
                        self.append_local_queue_db(qn=host["pinned_listen_queue"])
                    self.create_queue_worker(
                                            pinned_container_queue=host["pinned_listen_queue"],
                                            pinned_worker_qname=hst
                                            )
                    capacity = True
                    break
            # throw exception if no capcity found
            if not capacity:
                err = """Not enough pinned worker process capacity: kill pinned
                 processes or spin up more pinned workers!"""
                log.error(err)
                raise Exception(f"{err}")

    def render_task_response(self, task_job):
        """formats and returns the task rpc jobs result"""
        created_at = str(task_job.created_at)
        enqueued_at = str(task_job.enqueued_at)
        started_at = str(task_job.started_at)
        ended_at = str(task_job.ended_at)

        try:

            current_time = datetime.datetime.utcnow()
            created_parsed_time = datetime.datetime.strptime(created_at, "%Y-%m-%d %H:%M:%S.%f")

            # if enqueued but not started calculate time
            if enqueued_at != "None" and enqueued_at and started_at == "None":
                parsed_time = datetime.datetime.strptime(enqueued_at, "%Y-%m-%d %H:%M:%S.%f")
                task_job.meta["enqueued_elapsed_seconds"] = (current_time - parsed_time).seconds
                task_job.save()

            # if created but not finished calculate time
            if ended_at != "None" and ended_at:
                parsed_time = datetime.datetime.strptime(ended_at, "%Y-%m-%d %H:%M:%S.%f")
                task_job.meta["total_elapsed_seconds"] = (parsed_time - created_parsed_time).seconds
                task_job.save()

            elif ended_at == "None":
                task_job.meta["total_elapsed_seconds"] = (current_time - created_parsed_time).seconds
                task_job.save()

            # clean up vars for response
            created_at = None if created_at == "None" else created_at
            enqueued_at = None if enqueued_at == "None" else enqueued_at
            started_at = None if started_at == "None" else started_at
            ended_at = None if ended_at == "None" else ended_at

        except Exception as e:
            log.error(f"render_task_response : {str(e)}")
            pass

        resultdata = None
        resultdata = Response(status="success", data={
            "task_id": task_job.get_id(),
            "created_on": created_at,
            "task_queue": task_job.description,
            "task_meta": {
                "enqueued_at": enqueued_at,
                "started_at": started_at,
                "ended_at": ended_at,
                "enqueued_elapsed_seconds": task_job.meta["enqueued_elapsed_seconds"],
                "total_elapsed_seconds": task_job.meta["total_elapsed_seconds"]
            },
            "task_status": task_job.get_status(),
            "task_result": task_job.result,
            "task_errors": task_job.meta["errors"]
        }).dict()
        return resultdata

    def sendtask(self, q, exe, **kwargs):
        meta_template = self.get_redis_meta_template()
        task = self.local_queuedb[q]["queue"].enqueue_call(func=self.routes[exe], description=q, ttl=self.ttl,
                                                           result_ttl=self.task_result_ttl, kwargs=kwargs["kwargs"],
                                                           meta=meta_template, timeout=self.timeout)
        resultdata = self.render_task_response(task)
        return resultdata

    def execute_task(self, method, **kwargs):
        """main entry point for rpc tasks"""
        kw = kwargs.get("kwargs", False)
        connectionargs = kw.get("connection_args", False)
        host = False
        if connectionargs:
            host = kw["connection_args"].get("host", False)
        queue_strategy = kw.get("queue_strategy", False)
        if queue_strategy == "pinned":
            self.reoute_and_create_q_worker(hst=host)
            r = self.sendtask(q=host, exe=method, kwargs=kw)
        else:
            r = self.sendtask(q=config.redis_fifo_q, exe=method, kwargs=kw)
        return r

    def execute_service_task(self, metho, **kwargs):
        """service wrapper for execute task method"""
        log.info(kwargs)
        kw = kwargs.get("kwargs")
        resul = self.execute_task(method=metho, kwargs=kw)
        serv = self.create_service_instance(raw_data=kw)
        if serv:
            resul["data"]["service_id"] = serv
        return resul

    def fetchsubtask(self, parent_task_object):
        """fetches nested subtasks for service driven tasks"""
        try:
            status = parent_task_object["data"]["task_status"]
            log.info(f'fetching subtask: {parent_task_object["data"]["task_id"]}')
            task_errors = []
            for j in range(len(parent_task_object["data"]["task_result"])):
                tempres = Job.fetch(parent_task_object["data"]["task_result"][j]["data"]["data"]["task_id"], connection=self.base_connection)
                temprespobj = self.render_task_response(tempres)
                if status != "started" or status != "queued":
                    if temprespobj["data"]["task_status"] == "started":
                        parent_task_object["data"]["task_status"] = temprespobj["data"]["task_status"]
                if temprespobj["data"]["task_status"] == "failed":
                    task_errors.append({
                        parent_task_object["data"]["task_result"][j]["host"]: {
                            "task_id": parent_task_object["data"]["task_result"][j]["data"]["data"]["task_id"],
                            "task_errors": temprespobj["data"]["task_errors"]
                        }
                        })
                parent_task_object["data"]["task_result"][j]["data"].update(temprespobj)
            if len(task_errors) >= 1:
                parent_task_object["data"]["task_errors"] = task_errors
            return parent_task_object

        except Exception as e:
            return e

    def fetchtask(self, task_id):
        """gets a job result and renders it"""
        log.info(f"fetching task: {task_id}")
        try:
            task = Job.fetch(task_id, connection=self.base_connection)
            response_object = self.render_task_response(task)
            if "task_id" in str(response_object["data"]["task_result"]) and "operation" in str(response_object["data"]["task_result"]):
                response_object = self.fetchsubtask(parent_task_object=response_object)
            return response_object
        except Exception as e:
            return e

    def getjoblist(self, q):
        """provides a list of all jobs in the queue"""
        try:
            self.getqueue(q)
            # if single host lookup
            if q:
                if self.exists_in_local_queue_db(qn=q):
                    t = self.local_queuedb[q]["queue"].get_job_ids()
                    if t:
                        response_object = {
                            "status": "success",
                            "data": {
                                "task_id": t
                            }
                        }
                        return response_object
                    else:
                        return False
                else:
                    return False
            # multi host lookup
            elif not q:
                response_object = {
                    "status": "success",
                    "data": {
                        "task_id": []
                    }
                }
                for i in self.local_queuedb:
                    response_object["data"]["task_id"].append(self.local_queuedb[i]["queue"].get_job_ids())
                return response_object
        except Exception as e:
            return e

    def getjobliststatus(self, q):
        """provides a breakdown of all jobs in the queue"""
        log.info(f"getting jobs and status: {q}")
        try:
            if q:
                self.getqueue(q)
                task = self.local_queuedb[q]["queue"].get_job_ids()
                response_object = {
                    "status": "success",
                    "data": {
                        "task_id": []
                    }
                }
                # get startedjobs
                startedjobs = self.getstartedjobs(self.local_queuedb[q]["queue"])
                for job in startedjobs:
                    task.append(job)

                # get finishedjobs
                finishedjobs = self.getfinishedjobs(self.local_queuedb[q]["queue"])
                for job in finishedjobs:
                    task.append(job)

                # get failedjobs
                failedjobs = self.getfailedjobs(self.local_queuedb[q]["queue"])
                for job in failedjobs:
                    task.append(job)

                if task:
                    for job in task:
                        try:
                            jobstatus = Job.fetch(job, connection=self.base_connection)
                            jobdata = self.render_task_response(jobstatus)
                            response_object["data"]["task_id"].append(jobdata)
                        except Exception as e:
                            return e
                            pass
                return response_object
        except Exception as e:
            return e

    def getstartedjobs(self, q):
        """returns list of started redis jobs"""
        log.info(f"getting started jobs: {q}")
        try:
            registry = StartedJobRegistry(q, connection=self.base_connection)
            response_object = registry.get_job_ids()
            return response_object
        except Exception as e:
            return e

    def getfinishedjobs(self, q):
        """returns list of finished redis jobs"""
        log.info(f"getting finished jobs: {q}")
        try:
            registry = FinishedJobRegistry(q, connection=self.base_connection)
            response_object = registry.get_job_ids()
            return response_object
        except Exception as e:
            return e

    def getfailedjobs(self, q):
        """returns list of failed redis jobs"""
        log.info(f"getting failed jobs: {q}")
        try:
            registry = FailedJobRegistry(q, connection=self.base_connection)
            response_object = registry.get_job_ids()
            return response_object
        except Exception as e:
            return e

    def send_broadcast(self, msg: str):
        """publishes a message to all workers"""
        log.info(f"sending broadcast: {msg}")
        try:
            self.base_connection.publish(config.redis_broadcast_q, msg)
            return {
                "result": "Message Sent"
            }

        except Exception as e:
            return e

    def clear_cache_for_host(self, cache_key: str):
        """poisions a cache for a specific host"""
        if not cache_key.count(":") >= 2:
            log.error(f"{cache_key=} doesn't seem to be a valid cache key!")
        host_port = cache_key.split(":")[:2]  # first 2 segments
        modified_cache_key = ":".join(host_port)
        log.info(f"deleting {modified_cache_key=}")
        return self.cache.clear_keys(modified_cache_key)

    def get_workers(self):
        """returns stats about all running rq workers"""
        try:
            workers = Worker.all(connection=self.base_connection)
            result = []
            for w in workers:
                w_bd = str(w.birth_date)
                w_lhb = str(w.last_heartbeat)
                birth_d = datetime.datetime.strptime(w_bd, "%Y-%m-%d %H:%M:%S.%f")
                last_hb = datetime.datetime.strptime(w_lhb, "%Y-%m-%d %H:%M:%S.%f")
                result.append(WorkerResponse(
                    hostname=w.hostname,
                    pid=w.pid,
                    name=w.name,
                    last_heartbeat=last_hb,
                    birth_date=birth_d,
                    successful_job_count=w.successful_job_count,
                    failed_job_count=w.failed_job_count,
                    total_working_time=w.total_working_time
                ).dict())
            return result
        except Exception as e:
            log.error(f"get_workers: {e}")
            return e

    def kill_worker(self, worker_name=False):
        """kills a worker by its name and updates the pinned worker db"""
        running_workers = self.get_workers()
        killed = False
        for w in running_workers:
            if w["name"] == worker_name:
                killed = True
                kill_message = {
                    "type": "kill_worker_pid",
                    "kwargs": {
                        "hostname": w["hostname"],
                        "pid": w["pid"]
                        }
                    }
                self.send_broadcast(json.dumps(kill_message))

                # update pinned db
                r = self.base_connection.get(self.redis_pinned_store)
                rjson = json.loads(r)
                for container in rjson:
                    if container["hostname"] == w["hostname"]:
                        container["count"] -= 1
                self.base_connection.set(
                    self.redis_pinned_store,
                    json.dumps(rjson)
                    )

        if not killed:
            raise Exception(f"worker {worker_name} not found")

    def create_service_instance(self, raw_data):
        """creates a service id and stores it in the DB with the service
        payload"""
        u_uid = uuid.uuid4()
        sid = f"{1}_{u_uid}_service_instance"
        exists = self.base_connection.get(sid)
        if not exists:
            raw_json = json.dumps(raw_data)
            self.base_connection.set(sid, raw_json)
            return f"{u_uid}"
        else:
            return False

    def fetch_service_instance(self, sid):
        """returns ALL data from the latest copy of the latest service"""
        sid_parsed = f"1_{sid}_service_instance"
        exists = self.base_connection.get(sid_parsed)
        if not exists:
            return False
        else:
            return exists

    def fetch_service_instance_args(self, sid):
        """returns the args ONLY from the latest copy of the latest service"""
        service_inst_result = self.fetch_service_instance(sid)
        if service_inst_result:
            # scrub credentials
            service_inst_json = json.loads(service_inst_result)["args"]
            json_scrub_dict = ["$.username", "$.password", "$.key"]
            for scrub_match in json_scrub_dict:
                jsonpath_expr = parse(scrub_match)
                jsonpath_expr.find(service_inst_json)
                jsonpath_expr.update(service_inst_json, "*******")
            return service_inst_json
        else:
            return False

    def delete_service_instance(self, sid):
        """gets the service instance and deletes it from the db and network"""
        sid_parsed = f"1_{sid}_service_instance"
        res = json.loads(self.fetch_service_instance(sid))
        res["operation"] = "delete"
        result = self.execute_task(method="render_service", kwargs=res)
        self.base_connection.delete(sid_parsed)
        return result

    def redeploy_service_instance(self, sid):
        """redeploys the service instance to the network"""
        sid_parsed = f"1_{sid}_service_instance"
        res = json.loads(self.fetch_service_instance(sid))
        res["operation"] = "create"
        result = self.execute_task(method="render_service", kwargs=res)
        return result

    def retrieve_service_instance(self, sid):
        """validates the service instances state against the network"""
        sid_parsed = f"1_{sid}_service_instance"
        res = json.loads(self.fetch_service_instance(sid))
        res["operation"] = "retrieve"
        result = self.execute_task(method="render_service", kwargs=res)
        return result

    def validate_service_instance(self, sid):
        """validates the service instances state against the network"""
        sid_parsed = f"1_{sid}_service_instance"
        res = json.loads(self.fetch_service_instance(sid))
        res["operation"] = "validate"
        result = self.execute_task(method="render_service", kwargs=res)
        return result

    def get_service_instances(self):
        """retrieves all service instances in the redis store"""
        result = []
        for sid in self.base_connection.scan_iter("*_service_instance"):
            sid_str = sid.decode("utf-8")
            parsed_sid = sid_str.replace('1_', '').replace('_service_instance', '')
            sid_data = json.loads(self.fetch_service_instance(parsed_sid))
            if sid_data:
                appendres = {
                    "service_model": sid_data["service_model"],
                    "service_id": parsed_sid
                    }
                result.append(appendres)
        return result

    def fetch_pinned_store(self):
        """returns ALL data from the pinned store"""
        exists = self.base_connection.get(self.redis_pinned_store)
        result = json.loads(exists)
        return result

    def purge_container_from_pinned_store(self, name):
        """force purge a specific container from the pinned store"""
        r = self.base_connection.get(config.redis_pinned_store)
        rjson = json.loads(r)
        idex = 0
        for container in rjson:
            if container["hostname"] == name:
                rjson.pop(idex)
                self.base_connection.set(
                    config.redis_pinned_store,
                    json.dumps(rjson)
                    )
                break
            idex += 1

    def deregister_worker(self, container):
        """finds and deregisters an rq worker"""
        # purge all workers still running on this container
        workers = Worker.all(connection=self.base_connection)
        for worker in workers:
            if worker.hostname == f"{container}":
                worker.register_death()
예제 #52
0
class RedisPool(object):
    def __init__(self, host='localhost', port=6379, password=None, name=None):
        if name is None:
            name = uuid.uuid4()
        self.name = name
        self.redis = Redis(host=host, port=port, password=password)

    def add_job(self, job_name):
        assert self.redis.hget('pool_%s_jobs' % (self.name, ),
                               job_name) is None, "Job already exists"
        self.redis.hset('pool_%s_jobs' % (self.name, ), job_name, 'exists')

    def del_job(self, job_name):
        self.redis.hdel('pool_%s_jobs' % (self.name, ), job_name)

    def add_worker(self, worker_name):
        assert self.redis.hget('pool_%s_workers' % (self.name, ),
                               worker_name) is None, "Worker already exists"
        self.redis.hset('pool_%s_workers' % (self.name, ), worker_name,
                        'exists')

    def del_worker(self, worker_name):
        self.redis.hdel('pool_%s_workers' % (self.name, ), worker_name)

    def clear(self):
        self.redis.delete('pool_%s_queue' % (self.name, ))
        self.redis.delete('pool_%s_workers' % (self.name, ))
        self.redis.delete('pool_%s_jobs' % (self.name, ))

    def push_job(self, job_name, side='right'):
        if side is 'right':
            self.redis.rpush('pool_%s_queue' % (self.name), job_name)
        else:
            self.redis.lpush('pool_%s_queue' % (self.name), job_name)

    def pop_job(self, timeout=None):
        try:
            variable, item = self.redis.blpop('pool_%s_queue' % (self.name),
                                              timeout)
            return item
        except:
            return None

    @property
    def workers(self):
        worker_names = self.redis.hgetall('pool_%s_workers' % self.name).keys()
        return [RedisWorker(self, worker_name) for worker_name in worker_names]

    @property
    def jobs(self):
        job_names = self.redis.hgetall('pool_%s_jobs' % self.name).keys()
        return [RedisJob(self, job_name) for job_name in job_names]

    def apply_async(self,
                    f,
                    args=None,
                    kwargs=None,
                    callback=None,
                    job_name=None):
        if args is None:
            args = ()
        if kwargs is None:
            kwargs = {}

        job = RedisJob(self, job_name, f, args=args, kwargs=kwargs)
        job.enqueue()

        return job

    def close(self):
        """Close the queue to taking on new members
		"""
        pass

    def terminate(self):
        """ Immediately end all working jobs
		"""
        pass

    def join(self):
        """Wait for all processes to exit
		"""
        pass
예제 #53
0
class RedisUsersConnection:

    _universal_fields = [
        'username', 'password', 'email', 'models-provider', 'first-name',
        'last-name', 'registration-datetime'
    ]
    _temp_fields = ['motivation']
    _appr_fields = ['access-rights-sdo', 'access-rights-vendor']

    def __init__(self, db: t.Optional[t.Union[int, str]] = None):
        config = create_config()
        self._redis_host = config.get('DB-Section', 'redis-host')
        self._redis_port = config.get('DB-Section', 'redis-port')
        if db is None:
            db = config.get('DB-Section', 'redis-users-db', fallback=2)
        self.redis = Redis(host=self._redis_host, port=self._redis_port,
                           db=db)  # pyright: ignore

        self.log_directory = config.get('Directory-Section', 'logs')
        self.LOGGER = log.get_logger(
            'redisUsersConnection',
            '{}/redisUsersConnection.log'.format(self.log_directory))

    def username_exists(self, username: str) -> bool:
        return self.redis.hexists('usernames', username)

    def get_field(self, id: t.Union[str, int], field: str) -> str:
        r = self.redis.get('{}:{}'.format(id, field))
        return (r or b'').decode()

    def set_field(self, id: t.Union[str, int], field: str, value: str) -> bool:
        return bool(self.redis.set('{}:{}'.format(id, field), value))

    def delete_field(self, id: t.Union[str, int], field: str) -> bool:
        return bool(self.redis.delete('{}:{}'.format(id, field)))

    def is_approved(self, id: t.Union[str, int]) -> bool:
        return self.redis.sismember('approved', id)

    def is_temp(self, id: t.Union[str, int]) -> bool:
        return self.redis.sismember('temp', id)

    def id_by_username(self, username: str) -> str:
        r = self.redis.hget('usernames', username)
        return (r or b'').decode()

    def create(self, temp: bool, **kwargs) -> int:
        self.LOGGER.info('Creating new user')
        id = self.redis.incr('new-id')
        self.redis.hset('usernames', kwargs['username'], id)
        if 'registration_datetime' not in kwargs:
            kwargs['registration_datetime'] = str(datetime.datetime.utcnow())
        for field in self._universal_fields:
            self.set_field(id, field, kwargs[field.replace('-', '_')])
        self.redis.sadd('temp' if temp else 'approved', id)
        if temp:
            for field in self._temp_fields:
                self.set_field(id, field, kwargs[field.replace('-', '_')])
        else:
            for field in self._appr_fields:
                self.set_field(id, field, kwargs[field.replace('-', '_')])
        return id

    def delete(self, id: t.Union[str, int], temp: bool):
        self.LOGGER.info('Deleting user with id {}'.format(id))
        self.redis.hdel('usernames', self.get_field(id, 'username'))
        for field in self._universal_fields:
            self.delete_field(id, field)
        self.redis.srem('temp' if temp else 'approved', id)
        if temp:
            for field in self._temp_fields:
                self.delete_field(id, field)
        else:
            for field in self._appr_fields:
                self.delete_field(id, field)

    def approve(self, id: t.Union[str, int], access_rights_sdo: str,
                access_rights_vendor: str):
        self.LOGGER.info('Approving user with id {}'.format(id))
        self.redis.srem('temp', id)
        self.set_field(id, 'access-rights-sdo', access_rights_sdo)
        self.set_field(id, 'access-rights-vendor', access_rights_vendor)
        self.redis.delete('{}:{}'.format(id, 'motivation'))
        self.redis.sadd('approved', id)

    def get_all(self, status) -> list:
        return [id.decode() for id in self.redis.smembers(status)]

    def get_all_fields(self, id: t.Union[str, int]) -> dict:
        r = {}
        for field in self._universal_fields:
            r[field] = self.get_field(id, field)
        if self.is_temp(id):
            for field in self._temp_fields:
                r[field] = self.get_field(id, field)
        elif self.is_approved(id):
            for field in self._appr_fields:
                r[field] = self.get_field(id, field)
        return r
예제 #54
0
class RedisAdapter:
    def __init__(self, redis_connection_params, prefix, app=None):
        self._redis_connection = Redis(**redis_connection_params)
        self.prefix = prefix
        self.init_app(app)

    def init_app(self, app):
        self.app = app

    def exists(self, key):
        key = f'{self.prefix}:{key}'

        try:
            result = self._redis_connection.exists(key)
        except RedisError as e:
            if self.app:
                self.app.logger.warning('Redis is down!')
                self.app.logger.exception(e)
            return False

        return result

    def get(self, key):
        key = f'{self.prefix}:{key}'

        try:
            value = self._redis_connection.get(key)
        except RedisError as e:
            if self.app:
                self.app.logger.warning('Redis is down!')
                self.app.logger.exception(e)
            return None

        return pickle.loads(value)

    def set(self, key, value, timeout=None):
        key = f'{self.prefix}:{key}'

        if timeout is None:
            timeout = self.app.config['REDIS_TIMEOUT']
        value = pickle.dumps(value)
        try:
            self._redis_connection.setex(key, timeout, value)
        except RedisError as e:
            if self.app:
                self.app.logger.warning('Redis is down!')
                self.app.logger.exception(e)
            return False

        return True

    def invalidate(self, key):
        key = f'{self.prefix}:{key}'

        try:
            keys = self._redis_connection.keys(key)
            for k in keys:
                self._redis_connection.delete(k)
        except RedisError as e:
            if self.app:
                self.app.logger.warning('Redis is down!')
                self.app.logger.exception(e)
            return False

        return True
예제 #55
0
파일: add_ac.py 프로젝트: HASKADOG/nt_bot
from pyrogram.raw.functions import messages
from pyrogram.raw.functions.messages import GetDialogFilters
from pyrogram import Client
from redis import Redis

user = Client('admin')
redis = Redis()


with user:
    while True:
        a = redis.get('msg')
        if a:
            user.send_message('@WhileForInt', a.decode('UTF-8'))
            redis.delete('msg')


예제 #56
0
class ResourceManager(StrSignatureMixin):
    '''
    ``ResourceManager`` is a class manager computer resources such like ``file_system`` and ``data_base``.
    '''
    def __init__(self,
                 store_path="~/autoflow",
                 file_system="local",
                 file_system_params=frozendict(),
                 db_type="sqlite",
                 db_params=frozendict(),
                 redis_params=frozendict(),
                 max_persistent_estimators=50,
                 persistent_mode="fs",
                 compress_suffix="bz2"):
        '''

        Parameters
        ----------
        store_path: str
            A path store files, such as metadata and model file and database file, which belong to AutoFlow.
        file_system: str
            Indicator-string about which file system or storage system will be used.

            Available options list below:
                * ``local``
                * ``hdfs``
                * ``s3``

            ``local`` is default value.
        file_system_params: dict
            Specific file_system configuration.
        db_type: str
            Indicator-string about which file system or storage system will be used.

            Available options list below:
                * ``sqlite``
                * ``postgresql``
                * ``mysql``

            ``sqlite`` is default value.
        db_params: dict
            Specific database configuration.
        redis_params: dict
            Redis configuration.
        max_persistent_estimators: int
            Maximal number of models can persistent in single task.

            If more than this number, the The worst performing model file will be delete,

            the corresponding database record will also be deleted.
        persistent_mode: str
            Indicator-string about which persistent mode will be used.

            Available options list below:
                * ``db`` - serialize entity to bytes and store in database directly.
                * ``fs`` - serialize entity to bytes and form a pickle file upload to storage system or save in local.
        compress_suffix: str
            compress file's suffix, default is bz2
        '''
        # --logger-------------------
        self.logger = get_logger(self)
        # --preprocessing------------
        file_system_params = dict(file_system_params)
        db_params = dict(db_params)
        redis_params = dict(redis_params)
        # ---file_system------------
        directory = os.path.split(generic_fs.__file__)[0]
        file_system2cls = find_components(generic_fs.__package__, directory,
                                          FileSystem)
        self.file_system_type = file_system
        if file_system not in file_system2cls:
            raise Exception(f"Invalid file_system {file_system}")
        self.file_system: FileSystem = file_system2cls[file_system](
            **file_system_params)
        if self.file_system_type == "local":
            store_path = os.path.expandvars(os.path.expanduser(store_path))
        self.store_path = store_path
        # ---data_base------------
        assert db_type in ("sqlite", "postgresql", "mysql")
        self.db_type = db_type
        self.db_params = dict(db_params)
        if db_type == "sqlite":
            assert self.file_system_type == "local"
        # ---redis----------------
        self.redis_params = redis_params
        # ---max_persistent_model---
        self.max_persistent_estimators = max_persistent_estimators
        # ---persistent_mode-------
        self.persistent_mode = persistent_mode
        assert self.persistent_mode in ("fs", "db")
        # ---compress_suffix------------
        self.compress_suffix = compress_suffix
        # ---post_process------------
        self.store_path = store_path
        self.file_system.mkdir(self.store_path)
        self.is_init_experiments_db = False
        self.is_init_tasks_db = False
        self.is_init_hdls_db = False
        self.is_init_trials_db = False
        self.is_init_redis = False
        self.is_master = False
        # --some specific path based on file_system---
        self.datasets_dir = self.file_system.join(self.store_path, "datasets")
        self.databases_dir = self.file_system.join(self.store_path,
                                                   "databases")
        self.parent_trials_dir = self.file_system.join(self.store_path,
                                                       "trials")
        self.parent_experiments_dir = self.file_system.join(
            self.store_path, "experiments")
        for dir_path in [
                self.datasets_dir, self.databases_dir,
                self.parent_experiments_dir, self.parent_trials_dir
        ]:
            self.file_system.mkdir(dir_path)
        # --db-----------------------------------------
        self.Datebase = get_db_class_by_db_type(self.db_type)
        # --JSONField-----------------------------------------
        if self.db_type == "sqlite":
            from playhouse.sqlite_ext import JSONField
            self.JSONField = JSONField
        elif self.db_type == "postgresql":
            from playhouse.postgres_ext import JSONField
            self.JSONField = JSONField
        elif self.db_type == "mysql":
            from playhouse.mysql_ext import JSONField
            self.JSONField = JSONField

    def __reduce__(self):
        self.close_redis()
        self.close_experiments_table()
        self.close_tasks_table()
        self.close_hdls_table()
        self.close_trials_table()
        return super(ResourceManager, self).__reduce__()

    def update_db_params(self, database):
        db_params = dict(self.db_params)
        if self.db_type == "sqlite":
            db_params["database"] = self.file_system.join(
                self.databases_dir, f"{database}.db")
        elif self.db_type == "postgresql":
            pass
        elif self.db_type == "mysql":
            pass
        else:
            raise NotImplementedError
        return db_params

    def estimate_new_id(self, Dataset, id_field):
        # fixme : 用来预测下一个自增主键的ID,但是感觉有问题
        try:
            records = Dataset.select(getattr(Dataset, id_field)). \
                where(getattr(Dataset, id_field)). \
                order_by(-getattr(Dataset, id_field)). \
                limit(1)
            if len(records) == 0:
                estimated_id = 1
            else:
                estimated_id = getattr(records[0], id_field) + 1
        except Exception as e:
            self.logger.error(f"Database Error:\n{e}")
            estimated_id = 1
        return estimated_id

    def persistent_evaluated_model(self, info: Dict,
                                   trial_id) -> Tuple[str, str]:
        self.trial_dir = self.file_system.join(self.parent_trials_dir,
                                               self.task_id, self.hdl_id)
        self.file_system.mkdir(self.trial_dir)
        model_path = self.file_system.join(
            self.trial_dir, f"{trial_id}.{self.compress_suffix}")
        if info["intermediate_result"] is not None:
            intermediate_result_path = self.file_system.join(
                self.trial_dir, f"{trial_id}_inter-res.{self.compress_suffix}")
        else:
            intermediate_result_path = ""
        self.file_system.dump_pickle(info["models"], model_path)
        if intermediate_result_path:
            self.file_system.dump_pickle(info["intermediate_result"],
                                         intermediate_result_path)
        return model_path, intermediate_result_path

    def get_ensemble_needed_info(self, task_id,
                                 hdl_id) -> Tuple[MLTask, Any, Any]:
        self.task_id = task_id
        self.hdl_id = hdl_id
        self.init_tasks_table()
        task_record = self.TasksModel.select().where(
            self.TasksModel.task_id == task_id)[0]
        ml_task_str = task_record.ml_task
        ml_task = eval(ml_task_str)
        if self.persistent_mode == "fs":
            Xy_train_path = task_record.Xy_train_path
            Xy_train = self.file_system.load_pickle(Xy_train_path)
            Xy_test_path = task_record.Xy_test_path
            Xy_test = self.file_system.load_pickle(Xy_test_path)
        elif self.persistent_mode == "db":
            Xy_train = self.TasksModel.Xy_train
            Xy_test = self.TasksModel.Xy_test
        else:
            raise NotImplementedError
        return ml_task, Xy_train, Xy_test

    def load_best_estimator(self, ml_task: MLTask):
        # todo: 最后调用分析程序?
        self.init_trials_table()
        record = self.TrialsModel.select().group_by(
            self.TrialsModel.loss, self.TrialsModel.cost_time).limit(1)[0]
        if self.persistent_mode == "fs":
            models = self.file_system.load_pickle(record.models_path)
        else:
            models = record.models_bin
        if ml_task.mainTask == "classification":
            estimator = VoteClassifier(models)
        else:
            estimator = MeanRegressor(models)
        return estimator

    def load_best_dhp(self):
        trial_id = self.get_best_k_trials(1)[0]
        record = self.TrialsModel.select().where(
            self.TrialsModel.trial_id == trial_id)[0]
        return record.dict_hyper_param

    def get_best_k_trials(self, k):
        self.init_trials_table()
        trial_ids = []
        records = self.TrialsModel.select().order_by(
            self.TrialsModel.loss, self.TrialsModel.cost_time).limit(k)
        for record in records:
            trial_ids.append(record.trial_id)
        return trial_ids

    def load_estimators_in_trials(
            self, trials: Union[List, Tuple]) -> Tuple[List, List, List]:
        self.init_trials_table()
        records = self.TrialsModel.select().where(
            self.TrialsModel.trial_id << trials)
        estimator_list = []
        y_true_indexes_list = []
        y_preds_list = []
        for record in records:
            exists = True
            if self.persistent_mode == "fs":
                if not self.file_system.exists(record.models_path):
                    exists = False
                else:
                    estimator_list.append(load(record.models_path))
            else:
                estimator_list.append(record.models_bin)
            if exists:
                y_true_indexes_list.append(record.y_true_indexes)
                y_preds_list.append(record.y_preds)
        return estimator_list, y_true_indexes_list, y_preds_list

    def set_is_master(self, is_master):
        self.is_master = is_master

    # ----------runhistory------------------------------------------------------------------
    @property
    def runhistory_db_params(self):
        return self.update_db_params(self.current_tasks_db_name)

    @property
    def runhistory_table_name(self):
        return f"runhistory_{self.hdl_id}"

    # ----------database name------------------------------------------------------------------

    @property
    def meta_records_db_name(self):
        return "meta_records"

    @property
    def current_tasks_db_name(self):
        # return f"{self.task_id}-{self.hdl_id}"
        return f"task_{self.task_id}"

    # ----------redis------------------------------------------------------------------

    def connect_redis(self):
        if self.is_init_redis:
            return True
        try:
            self.redis_client = Redis(**self.redis_params)
            self.is_init_redis = True
            return True
        except Exception as e:
            self.logger.error(f"Redis Error:\n{e}")
            return False

    def close_redis(self):
        self.redis_client = None
        self.is_init_redis = False

    def clear_pid_list(self):
        self.redis_delete("autoflow_pid_list")

    def push_pid_list(self):
        if self.connect_redis():
            self.redis_client.rpush("autoflow_pid_list", os.getpid())

    def get_pid_list(self):
        if self.connect_redis():
            l = self.redis_client.lrange("autoflow_pid_list", 0, -1)
            return list(map(lambda x: int(x.decode()), l))
        else:
            return []

    def redis_set(self, name, value, ex=None, px=None, nx=False, xx=False):
        if self.connect_redis():
            self.redis_client.set(name, value, ex, px, nx, xx)

    def redis_get(self, name):
        if self.connect_redis():
            return self.redis_client.get(name)
        else:
            return None

    def redis_delete(self, name):
        if self.connect_redis():
            self.redis_client.delete(name)

    # ----------experiments_model------------------------------------------------------------------
    def get_experiments_model(self) -> pw.Model:
        class Experiments(pw.Model):
            experiment_id = pw.IntegerField(primary_key=True)
            general_experiment_timestamp = pw.DateTimeField(
                default=datetime.datetime.now)
            current_experiment_timestamp = pw.DateTimeField(
                default=datetime.datetime.now)
            hdl_id = pw.CharField(default="")
            task_id = pw.CharField(default="")
            hdl_constructors = self.JSONField(default=[])
            hdl_constructor = pw.TextField(default="")
            raw_hdl = self.JSONField(default={})
            hdl = self.JSONField(default={})
            tuners = self.JSONField(default=[])
            tuner = pw.TextField(default="")
            should_calc_all_metric = pw.BooleanField(default=True)
            data_manager_bin = PickleFiled(default=0)
            data_manager_path = pw.TextField(default="")
            column_descriptions = self.JSONField(default={})
            column2feature_groups = self.JSONField(default={})
            dataset_metadata = self.JSONField(default={})
            metric = pw.CharField(default=""),
            splitter = pw.CharField(default="")
            ml_task = pw.CharField(default="")
            should_store_intermediate_result = pw.BooleanField(default=False)
            fit_ensemble_params = pw.TextField(default="auto")
            additional_info = self.JSONField(default={})
            user = pw.CharField(default=getuser)

            class Meta:
                database = self.experiments_db

        self.experiments_db.create_tables([Experiments])
        return Experiments

    def get_experiment_id_by_task_id(self, task_id):
        self.init_tasks_table()
        return self.TasksModel.select(self.TasksModel.experiment_id).where(
            self.TasksModel.task_id == task_id)[0].experiment_id

    def load_data_manager_by_experiment_id(self, experiment_id):
        self.init_experiments_table()
        experiment_id = int(experiment_id)
        record = self.ExperimentsModel.select().where(
            self.ExperimentsModel.experiment_id == experiment_id)[0]
        data_manager_bin = record.data_manager_bin
        data_manager_path = record.data_manager_path
        if self.persistent_mode == "fs":
            data_manager = self.file_system.load_pickle(data_manager_path)
        elif self.persistent_mode == "db":
            data_manager = data_manager_bin
        else:
            raise NotImplementedError
        return data_manager

    def insert_to_experiments_table(self, general_experiment_timestamp,
                                    current_experiment_timestamp,
                                    hdl_constructors, hdl_constructor, raw_hdl,
                                    hdl, tuners, tuner, should_calc_all_metric,
                                    data_manager, column_descriptions,
                                    dataset_metadata, metric, splitter,
                                    should_store_intermediate_result,
                                    fit_ensemble_params, additional_info):
        self.init_experiments_table()
        # estimate new experiment_id
        experiment_id = self.estimate_new_id(self.ExperimentsModel,
                                             "experiment_id")
        # todo: 是否需要删除data_manager的Xy
        data_manager = deepcopy(data_manager)
        data_manager.X_train = None
        data_manager.X_test = None
        data_manager.y_train = None
        data_manager.y_test = None
        if self.persistent_mode == "fs":
            self.experiment_dir = self.file_system.join(
                self.parent_experiments_dir, str(experiment_id))
            self.file_system.mkdir(self.experiment_dir)
            data_manager_bin = 0
            data_manager_path = self.file_system.join(
                self.experiment_dir, f"data_manager.{self.compress_suffix}")
            self.file_system.dump_pickle(data_manager, data_manager_path)
        else:
            data_manager_path = ""
            data_manager_bin = data_manager
        experiment_record = self.ExperimentsModel.create(
            general_experiment_timestamp=general_experiment_timestamp,
            current_experiment_timestamp=current_experiment_timestamp,
            hdl_id=self.hdl_id,
            task_id=self.task_id,
            hdl_constructors=[str(item) for item in hdl_constructors],
            hdl_constructor=str(hdl_constructor),
            raw_hdl=raw_hdl,
            hdl=hdl,
            tuners=[str(item) for item in tuners],
            tuner=str(tuner),
            should_calc_all_metric=should_calc_all_metric,
            data_manager_bin=data_manager_bin,
            data_manager_path=data_manager_path,
            column_descriptions=column_descriptions,
            column2feature_groups=data_manager.column2feature_groups,  # todo
            dataset_metadata=dataset_metadata,
            metric=metric.name,
            splitter=str(splitter),
            ml_task=str(data_manager.ml_task),
            should_store_intermediate_result=should_store_intermediate_result,
            fit_ensemble_params=str(fit_ensemble_params),
            additional_info=additional_info)
        fetched_experiment_id = experiment_record.experiment_id
        if fetched_experiment_id != experiment_id:
            self.logger.warning("fetched_experiment_id != experiment_id")
        self.experiment_id = experiment_id

    def init_experiments_table(self):
        if self.is_init_experiments_db:
            return
        self.is_init_experiments_db = True
        self.experiments_db: pw.Database = self.Datebase(
            **self.update_db_params(self.meta_records_db_name))
        self.ExperimentsModel = self.get_experiments_model()

    def close_experiments_table(self):
        self.is_init_experiments_db = False
        self.experiments_db = None
        self.ExperimentsModel = None

    # ----------tasks_model------------------------------------------------------------------
    def get_tasks_model(self) -> pw.Model:
        class Tasks(pw.Model):
            # task_id = md5(X_train, y_train, X_test, y_test, splitter, metric)
            task_id = pw.CharField(primary_key=True)
            metric = pw.CharField(default="")
            splitter = pw.CharField(default="")
            ml_task = pw.CharField(default="")
            specific_task_token = pw.CharField(default="")
            # Xy_train
            Xy_train_hash = pw.CharField(default="")
            Xy_train_path = pw.TextField(default="")
            Xy_train_bin = pw.BitField(default=0)
            # Xy_test
            Xy_test_hash = pw.CharField(default="")
            Xy_test_path = pw.TextField(default="")
            Xy_test_bin = pw.BitField(default=0)

            class Meta:
                database = self.tasks_db

        self.tasks_db.create_tables([Tasks])
        return Tasks

    def insert_to_tasks_table(self, data_manager: DataManager, metric: Scorer,
                              splitter, specific_task_token):
        self.init_tasks_table()
        Xy_train_hash = get_hash_of_Xy(data_manager.X_train,
                                       data_manager.y_train)
        Xy_test_hash = get_hash_of_Xy(data_manager.X_test, data_manager.y_test)
        metric_str = metric.name
        splitter_str = str(splitter)
        ml_task_str = str(data_manager.ml_task)
        # ---task_id----------------------------------------------------
        m = hashlib.md5()
        get_hash_of_Xy(data_manager.X_train, data_manager.y_train, m)
        get_hash_of_Xy(data_manager.X_test, data_manager.y_test, m)
        get_hash_of_str(metric_str, m)
        get_hash_of_str(splitter_str, m)
        get_hash_of_str(ml_task_str, m)
        get_hash_of_str(specific_task_token, m)
        task_hash = m.hexdigest()
        task_id = task_hash
        records = self.TasksModel.select().where(
            self.TasksModel.task_id == task_id)
        # ---store_task_record----------------------------------------------------
        if len(records) == 0:
            # ---store_datasets----------------------------------------------------
            Xy_train = [data_manager.X_train, data_manager.y_train]
            Xy_test = [data_manager.X_test, data_manager.y_test]
            if self.persistent_mode == "fs":
                Xy_train_path = self.file_system.join(
                    self.datasets_dir,
                    f"{Xy_train_hash}.{self.compress_suffix}")
                self.file_system.dump_pickle(Xy_train, Xy_train_path)
                Xy_train_bin = 0
            else:
                Xy_train_path = ""
                Xy_train_bin = Xy_train
            if Xy_test_hash:
                if self.persistent_mode == "fs":
                    Xy_test_path = self.file_system.join(
                        self.datasets_dir,
                        f"{Xy_test_hash}.{self.compress_suffix}")
                    self.file_system.dump_pickle(Xy_test, Xy_test_path)
                    Xy_test_bin = 0
                else:
                    Xy_test_path = ""
                    Xy_test_bin = Xy_test
            else:
                Xy_test_path = ""
                Xy_test_bin = 0
            # if len(records) == 0:

            self.TasksModel.create(
                task_id=task_id,
                metric=metric_str,
                splitter=splitter_str,
                ml_task=ml_task_str,
                specific_task_token=specific_task_token,
                # Xy_train
                Xy_train_hash=Xy_train_hash,
                Xy_train_path=Xy_train_path,
                Xy_train_bin=Xy_train_bin,
                # Xy_test
                Xy_test_hash=Xy_test_hash,
                Xy_test_path=Xy_test_path,
                Xy_test_bin=Xy_test_bin,
            )
        self.task_id = task_id

    def init_tasks_table(self):
        if self.is_init_tasks_db:
            return
        self.is_init_tasks_db = True
        self.tasks_db: pw.Database = self.Datebase(
            **self.update_db_params(self.meta_records_db_name))
        self.TasksModel = self.get_tasks_model()

    def close_tasks_table(self):
        self.is_init_tasks_db = False
        self.tasks_db = None
        self.TasksModel = None

    # ----------hdls_model------------------------------------------------------------------
    def get_hdls_model(self) -> pw.Model:
        class HDLs(pw.Model):
            hdl_id = pw.CharField(primary_key=True)
            hdl = self.JSONField(default={})

            class Meta:
                database = self.hdls_db

        self.hdls_db.create_tables([HDLs])
        return HDLs

    def insert_to_hdls_table(self, hdl):
        self.init_hdls_table()
        hdl_hash = get_hash_of_dict(hdl)
        hdl_id = hdl_hash
        records = self.HDLsModel.select().where(
            self.HDLsModel.hdl_id == hdl_id)
        if len(records) == 0:
            self.HDLsModel.create(hdl_id=hdl_id, hdl=hdl)
        self.hdl_id = hdl_id

    def init_hdls_table(self):
        if self.is_init_hdls_db:
            return
        self.is_init_hdls_db = True
        self.hdls_db: pw.Database = self.Datebase(
            **self.update_db_params(self.current_tasks_db_name))
        self.HDLsModel = self.get_hdls_model()

    def close_hdls_table(self):
        self.is_init_hdls_db = False
        self.hdls_db = None
        self.HDLsModel = None

    # ----------trials_model------------------------------------------------------------------

    def get_trials_model(self) -> pw.Model:
        class Trials(pw.Model):
            trial_id = pw.IntegerField(primary_key=True)
            config_id = pw.CharField(default="")
            task_id = pw.CharField(default="")
            hdl_id = pw.CharField(default="")
            experiment_id = pw.IntegerField(default=0)
            estimator = pw.CharField(default="")
            loss = pw.FloatField(default=65535)
            losses = self.JSONField(default=[])
            test_loss = self.JSONField(default=[])
            all_score = self.JSONField(default={})
            all_scores = self.JSONField(default=[])
            test_all_score = self.JSONField(default={})
            models_bin = PickleFiled(default=0)
            models_path = pw.TextField(default="")
            y_true_indexes = PickleFiled(default=0)
            y_preds = PickleFiled(default=0)
            y_test_true = PickleFiled(default=0)
            y_test_pred = PickleFiled(default=0)
            smac_hyper_param = PickleFiled(default=0)
            dict_hyper_param = self.JSONField(default={})  # todo: json field
            cost_time = pw.FloatField(default=65535)
            status = pw.CharField(default="SUCCESS")
            failed_info = pw.TextField(default="")
            warning_info = pw.TextField(default="")
            intermediate_result_path = pw.TextField(default=""),
            intermediate_result_bin = PickleFiled(default=b''),
            timestamp = pw.DateTimeField(default=datetime.datetime.now)
            user = pw.CharField(default=getuser)
            pid = pw.IntegerField(default=os.getpid)

            class Meta:
                database = self.trials_db

        self.trials_db.create_tables([Trials])
        return Trials

    def init_trials_table(self):
        if self.is_init_trials_db:
            return
        self.is_init_trials_db = True
        self.trials_db: pw.Database = self.Datebase(
            **self.update_db_params(self.current_tasks_db_name))
        self.TrialsModel = self.get_trials_model()

    def close_trials_table(self):
        self.is_init_trials_db = False
        self.trials_db = None
        self.TrialsModel = None

    def insert_to_trials_table(self, info: Dict):
        self.init_trials_table()
        config_id = info.get("config_id")
        if self.persistent_mode == "fs":
            # todo: 考虑更特殊的情况,不同的任务下,相同的配置
            models_path, intermediate_result_path = \
                self.persistent_evaluated_model(info, config_id)
            models_bin = None
            intermediate_result_bin = None
        else:
            models_path = ""
            intermediate_result_path = ""
            models_bin = info["models"]
            intermediate_result_bin = info["intermediate_result"]
        self.TrialsModel.create(
            config_id=config_id,
            task_id=self.task_id,
            hdl_id=self.hdl_id,
            experiment_id=self.experiment_id,
            estimator=info.get("estimator", ""),
            loss=info.get("loss", 65535),
            losses=info.get("losses", []),
            test_loss=info.get("test_loss", 65535),
            all_score=info.get("all_score", {}),
            all_scores=info.get("all_scores", []),
            test_all_score=info.get("test_all_score", {}),
            models_bin=models_bin,
            models_path=models_path,
            y_true_indexes=info.get("y_true_indexes"),
            y_preds=info.get("y_preds"),
            y_test_true=info.get("y_test_true"),
            y_test_pred=info.get("y_test_pred"),
            smac_hyper_param=info.get("program_hyper_param"),
            dict_hyper_param=info.get("dict_hyper_param", {}),
            cost_time=info.get("cost_time", 65535),
            status=info.get("status", "failed"),
            failed_info=info.get("failed_info", ""),
            warning_info=info.get("warning_info", ""),
            intermediate_result_path=intermediate_result_path,
            intermediate_result_bin=intermediate_result_bin,
        )

    def delete_models(self):
        if hasattr(self, "sync_dict"):
            exit_processes = self.sync_dict.get("exit_processes", 3)
            records = 0
            for key, value in self.sync_dict.items():
                if isinstance(key, int):
                    records += value
            if records >= exit_processes:
                return False
        # master segment
        if not self.is_master:
            return True
        self.init_trials_table()
        estimators = []
        for record in self.TrialsModel.select().group_by(
                self.TrialsModel.estimator):
            estimators.append(record.estimator)
        for estimator in estimators:
            should_delete = self.TrialsModel.select().where(
                self.TrialsModel.estimator == estimator).order_by(
                    self.TrialsModel.loss, self.TrialsModel.cost_time).offset(
                        self.max_persistent_estimators)
            if len(should_delete):
                if self.persistent_mode == "fs":
                    for record in should_delete:
                        models_path = record.models_path
                        self.logger.info(
                            f"Delete expire Model in path : {models_path}")
                        self.file_system.delete(models_path)
                self.TrialsModel.delete().where(
                    self.TrialsModel.trial_id.in_(
                        should_delete.select(
                            self.TrialsModel.trial_id))).execute()
        return True
예제 #57
0
class RedisClient(object):
    """
    Redis client

    Redis中固定3个hash:;

    """
    def __init__(self, **kwargs):
        """
        kwargs: 'CONN_DB_INFO'配置信息
        """
        # CONN_DB_INFO配置结构如下
        # 'CONN_DB_INFO' : {'DBTYPE':'mysql',
        #                   'CONN': {'host':x, 'user':x,
        #                            'passwd':x, 'db':x, 'charset':'utf8'},},
        assert kwargs['DBTYPE'] == 'redis', (
            'DBtype error, {} Not `mysql`'.format(kwargs.get('DBTYPE')))
        cnn = kwargs['CONN']
        self.conn_info = {}
        if cnn.get('host'):
            self.conn_info['host'] = cnn.get('host')
        if cnn.get('port'):
            self.conn_info['port'] = cnn.get('port')
        if cnn.get('passwd'):
            self.conn_info['password'] = cnn.get('passwd')

        self.item_hash = 'tauction_item'
        self.item_url_scraped_hash = 'url_state1'  #已抓取,对应state==1
        self.item_url_scraping_hash = 'url_negativel'  #爬虫取正在抓取,对应state==-1
        self.item_url_hash = 'ur0'  #待传递给爬虫去抓取,对应state==0

        self.client = None
        self.state = None

    def get_basic_infos(self):
        """
        about state, host, db
        return: {'host':, 'state': ,}
        """
        return {'host': self.conn_info['host'], 'state': self.state}

    def open(self):
        """open redis client"""
        if self.state == 'opened':
            return

        self.client = Redis(connection_pool=BlockingConnectionPool(
            **self.conn_info))
        if self.client:
            self.state = 'opened'

        logging.info("redis,host:%s, state:%s", self.conn_info.get('host'),
                     self.state)

    def close(self):
        """close redis client"""
        if self.state == 'closed':
            return

        self.state = 'closed'
        if self.client:
            logging.info("数据库连接关闭,host:%s", self.conn_info.get('host'))
            self.client.close()

    def escape_string(self, value, mapping=None) -> str:
        """escape_string escapes *value* but not surround it with quotes."""
        return json.dumps(value)

    def insert_tauction_item(self, key, **kwargs) -> int:
        """
        写标的详单,同时更新url状态为已抓取
        若成功,返回1
        params:
           key:
           dict keys: url, atten, rec, notice, intro, attachs,bian
            video, images, preferred, state, spidername
        """
        assert self.client
        #入库详单
        value = json.dumps(kwargs)
        self.put(self.item_hash, key, value)
        #改变状态
        if self.exists(self.item_url_hash, key):
            self.delete(self.item_url_hash, key)  #保险动作
        self.delete(self.item_url_scraping_hash, key)
        num = self.put(self.item_url_scraped_hash, key, value)
        return num

    def insert_tauctionitem_url(self, key, **kwargs) -> int:
        """
        写标的url, 如果不提供状态state,写入均按照初始状态<0>写入

        返回值:
        -1  如果标的url已经历史入库过
        0   历史库没有,但入库失败
        1   入库成功
        params:
            key:
            kwargs keys: url, spidername, state
        """
        assert self.client

        if (self.exists(self.item_url_scraped_hash, key)
                or self.exists(self.item_url_scraping_hash, key)
                or self.exists(self.item_url_hash, key)):
            return -1

        state = 0
        num = 0
        if kwargs.get('state'):
            # 如果状态不合法,则认为是0状态
            state = kwargs.get('state')
            if state == 1:
                num = self.put(self.item_url_scraped_hash, key,
                               json.dumps(kwargs))
            elif state == -1:
                num = self.put(self.item_url_scraping_hash, key,
                               json.dumps(kwargs))
            else:
                kwargs['state'] = 0
                num = self.put(self.item_url_hash, key, json.dumps(kwargs))
        else:
            kwargs['state'] = 0
            num = self.put(self.item_url_hash, key, json.dumps(kwargs))
        return num

    def fetch_auction_item_urls(self, spider, maxnum) -> dict:
        """
        抓取最多max个未访问url,返回结构是{id:url}结构的字典

        params:
           spider: spider name
           maxnum: maximun urls got from db
        """
        assert maxnum > 0 and self.client
        samples = {}
        all_sets = self.getall(self.item_url_hash)

        if all_sets:
            if len(all_sets) <= maxnum:
                samples = all_sets
            else:
                #随机获取指定数量
                sample_keys = random.sample(list(all_sets), maxnum)
                #
                samples = {
                    k: v
                    for k, v in all_sets.items() if k in sample_keys
                }
            #改变状态
            for key, value in samples.items():
                self.delete(self.item_url_hash, key)
                self.put(self.item_url_scraping_hash, key, value)

            return {k: json.loads(v).get('url') for k, v in samples.items()}

    def get(self, hashname, key) -> str:
        """method"""
        #py3 redis.hget返回时byte类型
        data = self.client.hget(name=hashname, key=key)
        if data:
            return data.decode('utf-8')
        else:
            return None

    def put(self, hashname, key, value) -> int:
        """method"""
        num = self.client.hset(hashname, key, value)
        return num

    def delete(self, hashname, *key):
        """delete        """
        self.client.hdel(hashname, *key)

    def exists(self, hashname, key):
        """method"""
        return self.client.hexists(hashname, key)

    def update(self, hashname, key, value):
        """method"""
        self.client.hset(hashname, key, value)

    def getall(self, hashname) -> set:
        """method"""
        item_dict = self.client.hgetall(hashname)
        return {
            k.decode('utf-8'): v.decode('utf-8')
            for k, v in item_dict.items()
        }

    def clear(self, hashname):
        """method"""
        return self.client.delete(hashname)

    def getnumber(self, hashname):
        """method"""
        return self.client.hlen(hashname)
예제 #58
0
class TestAuthLockout(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        """
        Setup our self objects for this test case
        """
        super().__init__(*args, **kwargs)

        self.username = "******"
        self.redis = Redis()
        self.sample_fail_dict = {
            "failure_count": 0,
            "failure_timestamps": bytes()
        }

    def setUp(self) -> None:
        """
        Plumb up our values in redis, ensure Redis is good, etc.
        :return:
        """
        self.redis.delete("naas_failures_brett")

    def test_tacacs_auth_lockout(self):
        """
        Tests for the tacacs_auth_lockout function
        :return:
        """

        # Test if no existing failures in cache, and we're not reporting one
        with self.subTest(
                msg="Checking failures, none yet reported for user."):
            self.assertEqual(tacacs_auth_lockout(username=self.username),
                             False)

        # Test if no existing failures in cache, and we _ARE_ reporting one
        with self.subTest(msg="Reporting first failure for user."):
            self.assertEqual(
                tacacs_auth_lockout(username=self.username,
                                    report_failure=True), False)

        # Iterate up to 9 failures:
        for _ in range(8):
            tacacs_auth_lockout(username=self.username, report_failure=True)

        # Test if 9 existing failures and checking (But not adding new failure)
        with self.subTest(
                msg="Checking failures, 9 reported so far for user."):
            self.assertEqual(tacacs_auth_lockout(username=self.username),
                             False)

        # Test if 9 existing failures and we report the tenth
        with self.subTest(
                msg="Checking failures, 9 reported, reporting 1 more."):
            self.assertEqual(
                tacacs_auth_lockout(username=self.username,
                                    report_failure=True), True)

        # Test if 10 failures and we are simply checking
        with self.subTest(
                msg="Checking failures, 10 reported so far for user."):
            self.assertEqual(tacacs_auth_lockout(username=self.username), True)

        # Test if 10 failures and we try to report another
        with self.subTest(
                msg=
                "Checking failures, 10 reported so far for user, trying to report another failure."
        ):
            self.assertEqual(
                tacacs_auth_lockout(username=self.username,
                                    report_failure=True), True)

        # Test "old" failures by stashing a 9 failures from _before_ ten minutes ago.
        self.stash_failures(failure_count=9, old=True)

        # Test if 9 existing failures from greater than 10 minutes ago:
        with self.subTest(
                msg="Checking failures, 9 reported > 10 minutes ago."):
            self.assertEqual(tacacs_auth_lockout(username=self.username),
                             False)

        # Test if 9 existing failures from greater than 10 minutes ago, and we're reporting a new failure:
        with self.subTest(
                msg=
                "Checking failures, 9 reported > 10 minutes ago, reporting 1 new failure."
        ):
            self.assertEqual(
                tacacs_auth_lockout(username=self.username,
                                    report_failure=True), False)

        # Now add 9 new failures
        for _ in range(9):
            tacacs_auth_lockout(username=self.username, report_failure=True)

        # Finally test that these failures "count" and we're locked out:
        with self.subTest(
                msg=
                "Testing lockout after removing old faiulres, but new came in."
        ):
            self.assertEqual(
                tacacs_auth_lockout(username=self.username,
                                    report_failure=True), True)

    def tearDown(self) -> None:
        """
        Delete our test entry in Redis
        :return:
        """

        self.redis.delete("naas_failures_brett")

    def stash_failures(self, failure_count: int, old: bool) -> None:
        """
        Will clear the test DB entry, and stash the given number of failures.  Can also be _OLDER_ than 10 minutes
        :param failure_count: How many failures are we stashing
        :param old: Are we stashing failures from prior to 10 minutes ago?
        :return:
        """

        # Clear out all the failures from previous tests
        self.redis.delete("naas_failures_brett")

        self.sample_fail_dict["failure_count"] = failure_count
        fail_timestamps = []
        for _ in range(failure_count):
            # Add the failures into the list
            fail_time = datetime.now()
            if old:
                fail_time = fail_time - timedelta(minutes=30)
            fail_timestamps.append(fail_time)
        self.sample_fail_dict["failure_timestamps"] = dumps(fail_timestamps)

        self.redis.hmset("naas_failures_brett", self.sample_fail_dict)
예제 #59
0
class _RedisKvDB(KeyValueStore):
    f"""
    Redis key-value pair database implementation of KeyValueStore
    
    Defines methods for getting, deleting and putting key value pairs
    
    :param host, port, db (see also https://github.com/andymccurdy/redis-py)
    Example:
    ```
        db = KeyValueDatabase.instance(provider='redis', host='localhost', port=6379, db=0)
    ```
    """
    def __init__(self,
                 host='xcube-gen-stage-redis',
                 port=6379,
                 db=0,
                 use_mocker: bool = False,
                 **kwargs):
        super().__init__()
        try:
            from redis import Redis
        except ImportError:
            raise api.ApiError(
                500, "Error: Cannot import redis. Please install first.")

        host = os.getenv('XCUBE_HUB_REDIS_HOST') or host
        port = os.getenv('XCUBE_HUB_REDIS_PORT') or port
        db = os.getenv('XCUBE_HUB_REDIS_DB') or db

        if use_mocker is True or use_mocker == 1:
            self._db = _KvDBMocker()
        else:
            self._db = Redis(host=host, port=port, db=db, **kwargs)
            # self._db.ping()

    def get(self, key):
        """
        Get a key value
        :param key:
        :return:
        """

        from redis.exceptions import ConnectionError as RedisConnectionError
        try:
            val = self._db.get(key)
        except RedisConnectionError:
            raise api.ApiError(400, "System Error: redis cache not ready.")
        if isinstance(val, bytes):
            val = val.decode('utf-8')
        return val

    def set(self, key, value):
        """
        Set a key value
        :param value:
        :param key:
        :return:
        """

        from redis.exceptions import ConnectionError as RedisConnectionError
        try:
            return self._db.set(key, value)
        except RedisConnectionError:
            raise api.ApiError(400, "System Error: redis cache not ready.")

    def delete(self, key):
        """
        Delete a key
        :param key:
        :return:
        """

        from redis.exceptions import ConnectionError as RedisConnectionError
        try:
            return self._db.delete(key)
        except RedisConnectionError:
            raise api.ApiError(400, "System Error: redis cache not ready.")
예제 #60
0
from redis import Redis

r = Redis()
words = open("words_alpha.txt").readlines()
bigram_list = dict()

r.delete("words")
r.delete("letterpairs")
print(f"Processing {len(words)} words ", end='', flush=True)

count = 1
checkpoint = int(len(words) / 10)

for word in words:
    if not checkpoint % count:
        print(".", end='', flush=True)
    count += 1
    word = word.rstrip().lower()
    if len(word) >= 2:
        r.sadd("words", word)
        for i, j in enumerate(word):
            bigram = word[i:i + 2]
            if len(bigram) == 2:
                bigram_list.setdefault(bigram, 0)
                bigram_list[bigram] += 1

print(f"Processing {len(bigram_list)} bigrams ", end="", flush=True)
count = 1
checkpoint = int(len(bigram_list) / 10)

for pair, freq in bigram_list.items():