class RedisClusterClient: def __init__(self, host='127.0.0.1', port=6379): self.r = StrictRedisCluster(startup_nodes=[{ "host": host, "port": port }], decode_responses=True, skip_full_coverage_check=True) def get(self, key): return self.r.get(key) def put(self, key, value): return self.r.set(key, value, nx=True) def update(self, key, value): return self.r.set(key, value, xx=True) def remove(self, key): return self.r.delete(key) def remove_all(self): return self.r.flushdb()
def migrate(srchost, dsthost, srccluster, dstcluster, db, flush): if srchost == dsthost: print 'Source and destination must be different.' return if srccluster: source_nodes = literal_eval(srchost) source = StrictRedisCluster(startup_nodes=source_nodes, decode_responses=True) logging.debug('source cluster info: %s', source.cluster_info()) else: source = redis.Redis(srchost, db=db) if dstcluster: dest_nodes = literal_eval(dsthost) dest = StrictRedisCluster(startup_nodes=dest_nodes, decode_responses=True) logging.debug('dest cluster info: %s', dest.cluster_info()) else: dest = redis.Redis(dsthost, db=db) if flush: dest.flushdb() if srccluster: representatives = { v['cluster_my_epoch']: k for k, v in source.cluster_info().items() } size = source.dbsize() size = sum(size[reprensentative] for reprensentative in representatives.values()) else: size = source.dbsize() if size == 0: print 'No keys found.' return progress_widgets = [ '%d keys: ' % size, Percentage(), ' ', Bar(), ' ', ETA() ] pbar = ProgressBar(widgets=progress_widgets, maxval=size).start() COUNT = 2000 # scan size cnt = 0 non_existing = 0 already_existing = 0 cursor = 0 if srccluster: counter = 0 keys = [] # iterate all the keys for key in source.scan_iter(count=COUNT): counter += 1 keys.append(key) if counter % COUNT == 0: already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) cnt += len(keys) pbar.update(min(size, cnt)) keys = [] # handle the remaining if len(keys) > 0: already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) cnt += len(keys) pbar.update(min(size, cnt)) else: while True: cursor, keys = source.scan(cursor, count=COUNT) already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) if cursor == 0: break cnt += len(keys) pbar.update(min(size, cnt)) pbar.finish() print 'Keys disappeared on source during scan:', non_existing print 'Keys already existing on destination:', already_existing
}, { 'host': '192.168.230.218', 'port': 6382 }, { 'host': '192.168.230.223', 'port': 6383 }, { 'host': '192.168.230.223', 'port': 6384 }, { 'host': '192.168.230.223', 'port': 6385 }] r = StrictRedisCluster(startup_nodes=redis_nodes) r.flushdb() # 增加url到redis里面 def pushToRedis(name, valueList): for i in range(50): for item in valueList: r.lpush(name, item) name = "url" urlList = ["https://www.baidu.com", "http://www.tybai.com/"] # 添加到消息列队中 pushToRedis(name, urlList)
class RedisClient(object): def __init__(self, name='common', db=0, host=None, port=None): if host is None and port is None: self.config = DBConfigParser().get_config( server='redis_common_colony', key='colony') # self.config = DBConfigParser().get_config(server='redis_common_colony', key='105-62-93colony') self.host = self.config.get('host') self.port = self.config.get('port') self.db = self.config.get('db') if '|' in self.host: host_list = self.host.split('|') redis_nodes = [] for ho in host_list: redis_nodes.append({ 'host': str(ho), 'port': self.port, 'db': self.db }) self.conn = StrictRedisCluster(startup_nodes=redis_nodes) else: self.conn = redis.Redis(host=self.host, port=self.port, db=self.db) else: self.host = host self.port = port self.db = db self.conn = redis.Redis(host=self.host, port=self.port, db=self.db) self.name = name def get(self): """ 随机从redis里获取一个ip :return: """ key = self.conn.hgetall(name=self.name) rkey = random.choice(list(key.keys())) if key else None if isinstance(rkey, bytes): return rkey.decode('utf-8') else: return rkey def save(self, key): """ 保存ip :param key: :return: """ key = json.dumps(key) if isinstance(key, (dict, list)) else key return self.conn.hincrby(self.name, key, str(int(time.time()))) def get_value(self, key): """ 获取ip的值 :param key: :return: """ value = self.conn.hget(self.name, key) return value if value else None def pop(self): """ 获取一个ip并从池中删除 :return: """ key = self.get() if key: self.conn.hdel(self.name, key) return key def del_ip(self, key): """ 删除指定ip :param key: :return: """ self.conn.hdel(self.name, key) def del_all(self): """ 删除该表所有ip :return: """ self.conn.flushdb() def get_all(self): """ 获取所有的ip :return: """ if sys.version_info.major == 3: return [ key.decode('utf-8') for key in self.conn.hgetall(self.name).keys() ] else: return self.conn.hgetall(self.name).keys() def get_count(self): """ 获取池里ip数量 :return: """ return self.conn.hlen(self.name)
if len(sys.argv) < 3: logging.error("Invalid number of arguments.") logging.info("Usage: ./redis-app-benchmark.py <batch_size> <num_processes>") exit(1) batch_size = int(sys.argv[1]) num_processes = int(sys.argv[2]) for k in range(batch_size * num_processes): keys.append(uuid.uuid4()) try: response = rs.client_list() for r in response : logging.info('Client: ' + r) except redis.ConnectionError, e: # if you end up here, redis instance is down logging.error('connection error: ' + str(e)) exit(1) rs.flushdb() print_cluster_info(rs) print_dbsize(rs) do_set(rs, batch_size, num_processes) print_dbsize(rs) do_get(rs, batch_size, num_processes)