class DBAdaptor(object): def __init__(self): conf = cper.read_yaml_file('redis') host = conf['host'] port = conf['port'] db = conf['db'] max_conn = conf['max_conn'] self.redis_pool = ConnectionPool(host=host, port=port, max_connections=max_conn, db=db) def create_conn(self, db_type='influxdb'): if db_type == 'redis': return RedisDB(self.redis_pool) elif db_type == 'influxdb': return InfluxDB() elif db_type == 'mysql': return MysqlDB() else: print('Error! Please specify the type of database.') return None def saveall(self, data): pass def createtab(self): pass def get_data(self): print('factory') def __del__(self): self.redis_pool.disconnect()
def follow(request, followusername): try: currentUser = request.user except Users.DoesNotExist: return HttpResponseRedirect(reverse('users:pleaselogin')) except KeyError: return HttpResponseRedirect(reverse('users:pleaselogin')) currentUsername = currentUser.username if currentUser: followkey = generateKey(currentUsername, RedisKey['FOLLOWKEY']) fanskey = generateKey(followusername, RedisKey['FANSKEY']) pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool) if redis.exists(followkey): if redis.sismember(followkey, followusername): return HttpResponseRedirect( reverse('blogs:content', kwargs={'blogId': request.session['currblogId']})) else: redis.sadd(followkey, followusername) redis.sadd(fanskey, currentUsername) else: redis.sadd(followkey, followusername) redis.sadd(fanskey, currentUsername) pool.disconnect() return HttpResponseRedirect( reverse('blogs:content', kwargs={'blogId': request.session['currblogId']}))
def getUserDataInfo(username): blogList = Blog.objects.filter(auther=User.objects.get( username=username)).filter(draft=False) followkey = generateKey(username, RedisKey['FOLLOWKEY']) fanskey = generateKey(username, RedisKey['FANSKEY']) followcount = 0 fanscount = 0 blogCount = 0 commentCount = 0 pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool) if redis.exists(followkey): followcount = redis.scard(followkey) if redis.exists(fanskey): fanscount = redis.scard(fanskey) for blog in blogList: blogCount = blogCount + 1 commentCount = commentCount + blog.commentcount messagekey = generateKey(username, RedisKey['UNREADMSGKEY']) if redis.exists(messagekey): msgcount = redis.llen(messagekey) else: msgcount = 0 pool.disconnect() userdataInfo = { 'followcount': followcount, 'fanscount': fanscount, 'blogcount': blogCount, 'commentcount': commentCount, 'msgcount': msgcount } return userdataInfo
def clearRedis(keys): pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool) for key in keys: if redis.exists(key): redis.delete(key) pool.disconnect()
class PoolRedisDispatcher(AbstractRedisDispatcher): redis_pool = None def __init__(self, *pool_args, **pool_kwargs): self.redis_pool = ConnectionPool(*pool_args, **pool_kwargs) AbstractRedisDispatcher.__init__(self) def get_redis(self): return Redis(connection_pool=self.redis_pool) def close(self): self.redis_pool.disconnect()
def index(request): if request.user.is_authenticated: user = request.user else: user = get_user(request) username = request.user.username blogList = Blog.objects.filter(draft=False).order_by('title') # 引入分页机制 paginator = Paginator(blogList, 10) page = request.GET.get('page') try: blogs = paginator.page(page) except PageNotAnInteger: blogs = paginator.page(1) except EmptyPage: blogs = paginator.page(paginator.num_pages) pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool) kwargs = {} kwargs['searchlist'] = [{'title': u'标题'}, {'content': u'正文'}] kwargs['multichoice'] = True # searchform = enginechoicesearchForm(**kwargs) searchform = eschoicesearchForm(**kwargs) # get all unread message count by redis messagekey = generateKey(username, RedisKey['UNREADMSGKEY']) if redis.exists(messagekey): msgcount = redis.llen(messagekey) else: msgcount = 0 pool.disconnect() content = { 'blog_list': blogs, 'curruser': user, 'searchform': searchform, 'msgcount': msgcount, } return render(request, 'myblog/index.html', content)
def thumbup(request): try: currentUser = request.user except KeyError: return render(request, 'users/pleaselogin.html') except Users.DoesNotExist: return render(request, 'users/pleaselogin.html') blogId = request.session['currblogId'] blog = Blog.objects.get(pk=blogId) auther = blog.auther.username userthumb_key = generateKey(currentUser.username, RedisKey['THUMBUPKEY']) blogthumb_key = generateKey(blogId, RedisKey['THUMBCOUNTKEY']) pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool, decode_responses=True) title = '' countOfThumb = 0 messagekey = generateKey(auther, RedisKey['UNREADMSGKEY']) # 每个读者不能给同一篇文章多次点赞 if redis.sismember(userthumb_key, blogId): pass else: redis.sadd(userthumb_key, blogId) if redis.exists(blogthumb_key): redis.incr(blogthumb_key) else: redis.set(blogthumb_key, countOfThumb) redis.incr(blogthumb_key) message_content = currentUser.username + u'点赞了博客' + blog.title + u'于' + str( datetime.datetime.now()) redis.lpush(messagekey, message_content) pool.disconnect() return HttpResponseRedirect( reverse('blogs:content', kwargs={'blogId': request.session['currblogId']}))
class RedisBackend(DistributedBackend): component_name = 'Redis Backend' def __init__(self, manager): self.manager = manager self._logger = logging.getLogger("redis_backend.backend") settings = manager.settings port = settings.get('REDIS_PORT') host = settings.get('REDIS_HOST') self._min_hosts = settings.get('BC_MIN_HOSTS') self._max_requests_per_host = settings.get('BC_MAX_REQUESTS_PER_HOST') self.queue_partitions = settings.get('SPIDER_FEED_PARTITIONS') self._logger.info("RedisBackend started with {} partitions".format( self.queue_partitions)) self.pool = ConnectionPool(host=host, port=port, db=0) self._metadata = None self._queue = None self._states = None @classmethod def strategy_worker(cls, manager): o = cls(manager) o._init(manager, "strategy_worker") return o @classmethod def db_worker(cls, manager): o = cls(manager) o._init(manager, "db_worker") return o @classmethod def local(cls, manager): o = cls(manager) o._init(manager) return o def _init(self, manager, typ="all"): settings = manager.settings if typ in ["strategy_worker", "all"]: self._states = RedisState( self.pool, settings.get('REDIS_STATE_CACHE_SIZE_LIMIT')) if typ in ["db_worker", "all"]: clear = settings.get('REDIS_DROP_ALL_TABLES') self._queue = RedisQueue(manager, self.pool, self.queue_partitions, delete_all_keys=clear) self._metadata = RedisMetadata(self.pool, clear) @property def metadata(self): return self._metadata @property def queue(self): return self._queue @property def states(self): return self._states def frontier_start(self): for component in [self.metadata, self.queue, self.states]: if component: component.frontier_start() def frontier_stop(self): for component in [self.metadata, self.queue, self.states]: if component: component.frontier_stop() self.pool.disconnect() def add_seeds(self, seeds): self.metadata.add_seeds(seeds) def page_crawled(self, response): self.metadata.page_crawled(response) def links_extracted(self, request, links): self.metadata.links_extracted(request, links) def request_error(self, page, error): self.metadata.request_error(page, error) def finished(self): raise NotImplementedError def get_next_requests(self, max_next_requests, **kwargs): next_pages = [] self._logger.debug("Querying queue table.") partitions = set(kwargs.pop('partitions', [])) for partition_id in partitions: results = self.queue.get_next_requests( max_next_requests, partition_id, min_hosts=self._min_hosts, max_requests_per_host=self._max_requests_per_host) next_pages.extend(results) self._logger.debug("Got %d requests for partition id %d", len(results), partition_id) return next_pages
def content(request, blogId): pool = ConnectionPool(host='localhost', port='6379', db=0) redis = StrictRedis(connection_pool=pool, decode_responses=True) currentusername = request.user.username blog = Blog.objects.get(id=blogId) title_key = generateKey(blogId, RedisKey['TITLEKEY']) readcount_key = generateKey(blogId, RedisKey['READCOUNTKEY']) if redis.exists(title_key): blog_title = redis.get(title_key).decode() else: redis.set(title_key, blog.title) blog_title = redis.get(title_key).decode() blog_content = blog.content countOfThumb = 0 blogthumb_key = generateKey(blogId, RedisKey['THUMBCOUNTKEY']) if redis.exists(blogthumb_key): countOfThumb = redis.get(blogthumb_key).decode() userthumb_key = generateKey(currentusername, RedisKey['THUMBUPKEY']) thumbflag = 'F' if redis.exists(userthumb_key): if redis.sismember(userthumb_key, blogId): thumbflag = 'T' pool.disconnect() comment = Comment.objects.filter(attachedblog=blog) request.session['currblogId'] = blogId # blog_title = blog.title # blog_content = blog.content blogContent = { 'blog_title': blog_title, 'content': blog_content, 'comment_list': comment, 'countOfThumb': countOfThumb, 'thumbupflag': thumbflag, 'auther': blog.auther, 'curruser': request.user } userdataInfo = getUserDataInfo(blog.auther.username) blogContent = {**blogContent, **userdataInfo} # blog.readcount+=1 # blog.save() readblog_key = generateKey(currentusername, RedisKey['READBLOGKEY']) readblogIdlist = [] response = render(request, 'blogs/content.html', blogContent) if readblog_key in request.COOKIES: readblogIdlist = request.COOKIES.get(readblog_key).split(',') if blogId not in readblogIdlist: if redis.exists(readcount_key): redis.incr(readcount_key) else: redis.set(readcount_key, blog.readcount) redis.incr(readcount_key) else: if redis.exists(readcount_key): redis.incr(readcount_key) else: redis.set(readcount_key, blog.readcount) redis.incr(readcount_key) blog.readcount = redis.get(readcount_key).decode() blog.save() # 添加cookie readblogIdlist.append(blogId) readblogIdStr = ','.join(readblogIdlist) response.set_cookie(readblog_key, readblogIdStr, 60) return response
with conn.cursor() as cur: cur.execute( 'UPDATE m2_package SET license = %s, success = %s WHERE index_id = %s', (','.join(artifact.licenses), True, index)) conn.commit() except: conn.rollback() raise finally: pg_pool.putconn(conn) def start(): global stop while not stop: location = read_db(get_next()) if location is None: break try: artifact = Artifact(location[1:]) set_licenses(location[0], artifact) except BaseException as e: logger.error("%s\n%s\n\n", location, e, exc_info=1) break start() redis_pool.disconnect() pg_pool.closeall()
class RedisPool(object): def __init__(self, urls, username=None, password=None, redis_mode=RedisMode.STANDALONE, timeout=5, master_name=None, db=0, decode_responses=True): """ :param urls: redis 地址 ('hostname', 6379) 或 [('hostname', 6379),('hostname', 6378)] 或 [{"host": "127.0.0.1", "port": "7000"}, {"host": "127.0.0.1", "port": "7001"}] :param password: auth :param redis_mode: @see RedisMode :param timeout: :param master_name: :param db: :param decode_responses: """ self.urls = urls self.username = username self.password = password self.redis_mode = redis_mode self.timeout = timeout self.master_name = master_name self.db = db self.__conn = None self.__pool = None self.__cluster = None if self.redis_mode == RedisMode.SENTINEL or self.redis_mode == 1: if isinstance(urls, str): urls = [url.split(":") for url in urls.split(";")] if not isinstance(urls, List) and not isinstance(urls, Tuple): raise TypeError( "url : [('hostname', 6379),('hostname', 6378)]") sentinel = Sentinel(urls, socket_timeout=self.timeout, db=self.db, username=username, password=self.password, decode_responses=decode_responses) self.__pool = SentinelConnectionPool(master_name, sentinel, password=self.password) elif self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: if isinstance(urls, str): def addr(url): _host, _port = url.split(":") return {"host": _host, "port": _port} urls = [addr(url) for url in urls.split(";")] if not isinstance(urls, List) and not isinstance(urls, Tuple): raise TypeError( 'url : [{"host": "127.0.0.1", "port": "7000"}, {"host": "127.0.0.1", "port": "7001"}]' ) self.__cluster = RedisCluster(startup_nodes=urls, decode_responses=decode_responses, socket_timeout=self.timeout, db=self.db, username=username, password=self.password) elif self.redis_mode == RedisMode.STANDALONE or self.redis_mode == 0: if isinstance(urls, str): urls = urls.split(":") if not isinstance(urls, List) and not isinstance(urls, Tuple): raise TypeError("url : ('hostname', 6379)") hostname, port = urls self.__pool = ConnectionPool(host=hostname, port=port, socket_timeout=self.timeout, password=self.password, db=self.db, username=username, decode_responses=decode_responses) else: raise TypeError('redis mode err') def __connection(self) -> Redis: if not self.__conn: if self.redis_mode == RedisMode.SENTINEL or self.redis_mode == RedisMode.STANDALONE: self.__conn = Redis(connection_pool=self.__pool) elif self.redis_mode == RedisMode.CLUSTER: pass else: raise TypeError('redis mode err') return self.__conn def database(self) -> Union[Database, RedisCluster]: if self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: return self.__cluster else: return Database(self.__pool) def graph(self, name): """ RedisGraph api @see https://github.com/RedisGraph/redisgraph-py pip install redisgraph :param name: :return: """ if self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: raise NotImplementedError("cluster is not supported") if not self.__conn: self.__connection() from redisgraph import Graph return Graph(name, self.__conn) def json(self): """ RedisJson api @see https://github.com/RedisJSON/redisjson-py pip install rejson :return: """ if self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: raise NotImplementedError("cluster is not supported") from .json import Json return Json(self.__pool) def search(self, index_name): """ RediSearch api @see https://github.com/RediSearch/redisearch-py pip install redisearch :return: """ if self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: raise NotImplementedError("cluster is not supported") from .search import Search return Search(index_name, self.__pool) def table(self, table_name): """ :param table_name: :return: """ if self.redis_mode == RedisMode.CLUSTER or self.redis_mode == 2: raise NotImplementedError("cluster is not supported") from .table import Table return Table(self.search(table_name), table_name) def close(self): """ 关闭连接池和当前使用的连接 :return: """ if self.__pool: self.__pool.disconnect() if self.__cluster: self.__cluster.close()