def migrate(srchost, dsthost, srccluster, dstcluster, db, flush): if srchost == dsthost: print 'Source and destination must be different.' return if srccluster: source_nodes = literal_eval(srchost) source = StrictRedisCluster(startup_nodes=source_nodes, decode_responses=True) logging.debug('source cluster info: %s', source.cluster_info()) else: source = redis.Redis(srchost, db=db) if dstcluster: dest_nodes = literal_eval(dsthost) dest = StrictRedisCluster(startup_nodes=dest_nodes, decode_responses=True) logging.debug('dest cluster info: %s', dest.cluster_info()) else: dest = redis.Redis(dsthost, db=db) if flush: dest.flushdb() if srccluster: representatives = { v['cluster_my_epoch']: k for k, v in source.cluster_info().items() } size = source.dbsize() size = sum(size[reprensentative] for reprensentative in representatives.values()) else: size = source.dbsize() if size == 0: print 'No keys found.' return progress_widgets = [ '%d keys: ' % size, Percentage(), ' ', Bar(), ' ', ETA() ] pbar = ProgressBar(widgets=progress_widgets, maxval=size).start() COUNT = 2000 # scan size cnt = 0 non_existing = 0 already_existing = 0 cursor = 0 if srccluster: counter = 0 keys = [] # iterate all the keys for key in source.scan_iter(count=COUNT): counter += 1 keys.append(key) if counter % COUNT == 0: already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) cnt += len(keys) pbar.update(min(size, cnt)) keys = [] # handle the remaining if len(keys) > 0: already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) cnt += len(keys) pbar.update(min(size, cnt)) else: while True: cursor, keys = source.scan(cursor, count=COUNT) already_existing, non_existing = handle_keys( source, dest, keys, already_existing, non_existing) if cursor == 0: break cnt += len(keys) pbar.update(min(size, cnt)) pbar.finish() print 'Keys disappeared on source during scan:', non_existing print 'Keys already existing on destination:', already_existing
class RedisCluster: def __init__(self): try: self.rc = StrictRedisCluster(startup_nodes=StartupNodesServer, decode_responses=True) except: traceback.print_exc() def count_keys(self): # 查询当前库里有多少key return self.rc.dbsize() def exists_key(self, key): return self.rc.exists(key) def delete_key(self, key): self.rc.delete(key) def rename_key(self, key1, key2): self.rc.rename(key1, key2) # String操作 def set_key_value(self, key, value): self.rc.set(key, value) def get_key_value(self, key): # 没有对应key返回None return self.rc.get(key) # Hash操作 def set_hash(self, key, mapping): # mapping为字典, 已存在key会覆盖mapping self.rc.hmset(key, mapping) def delete_hash_field(self, key, field): # 删除hash表中某个字段,无论字段是否存在 self.rc.hdel(key, field) def exists_hash_field(self, key, field): # 检查hash表中某个字段存在 return self.rc.hexists(key, field) def get_hash_field(self, key, field): # 获取hash表中指定字段的值, 没有返回None return self.rc.hget(key, field) def get_hash_all_field(self, key): # 获取hash表中指定key所有字段和值,以字典形式,没有key返回空字典 return self.rc.hgetall(key) def increase_hash_field(self, key, field, increment): # 为hash表key某个字段的整数型值增加increment self.rc.hincrby(key, field, increment) # List操作 def rpush_into_lst(self, key, value): # url从头至尾入列 self.rc.rpush(key, value) def lpush_into_lst(self, key, value): # url从尾至头入列 self.rc.lpush(key, value) def lpop_lst_item(self, key): # 从头取出列表第一个元素,没有返回None return self.rc.lpop(key) def blpop_lst_item( self, key): # 从头取出列表第一个元素(元组形式,值为元祖[1], 元祖[0]为key名),并设置超时,超时返回None return self.rc.blpop(key, timeout=1) def rpop_lst_item(self, key): # 从尾取出列表最后一个元素,没有返回None return self.rc.rpop(key) def brpop_lst_item( self, key): # 从尾取出列表最后一个元素(元组形式,值为元祖[1], 元祖[0]为key名),并设置超时,超时返回None return self.rc.brpop(key, timeout=1) # Set操作 def add_set(self, key, value): self.rc.sadd(key, value) def is_member(self, key, value): return self.rc.sismember(key, value) def pop_member(self, key): # 随机移除一个值并返回该值,没有返回None return self.rc.spop(key) def pop_members(self, key, num): # 随机取出num个值(非移除),列表形式返回这些值,没有返回空列表 return self.rc.srandmember(key, num) def remove_member(self, key, value): # 移除集合中指定元素 self.rc.srem(key, value) def get_all_members(self, key): # 返回集合中全部元素,不删除 return self.rc.smembers(key) def remove_into(self, key1, key2, value): # 把集合key1中value元素移入集合key2中 self.rc.smove(key1, key2, value) def count_members(self, key): # 计算集合中成员数量 return self.rc.scard(key)
class Redis: def __init__(self, ci): log.debug('create connection = %s', ci) t = ci.type self.t = t if t == 1: log.debug('create redis connection.') self.conn = StrictRedis(host=ci.host, port=ci.port, db=ci.db) elif t == 2: log.debug('create redis cluster connection.') nodes = json.loads(ci.host) pool = ClusterConnectionPool(startup_nodes=nodes) self.conn = StrictRedisCluster(connection_pool=pool, decode_responses=True) elif t == 3: log.debug('create redis connection from zookeeper.') client = zk.Client(hosts=ci.host, read_only=True) node = client.get(ci.path) arr = str(node[0], encoding='utf-8').split('\n') address = [] for h in arr: if h is '': continue a = h.split(':') address.append({'host': a[0], 'port': int(a[1])}) pool = ClusterConnectionPool(startup_nodes=address) self.conn = StrictRedisCluster(connection_pool=pool, decode_responses=True) else: raise AttributeError('illegal ConnInfo type.') if self.test(): self.ci = ci log.info('connect redis(%s) success', ci.host) def test(self): log.debug('test connect redis(%s)', self.conn) good = False try: result = self.conn.ping() if self.t == 1: good = result else: for k in result: v = result[k] log.debug('test [%s] result : %s', k, v) if not v: return False good = True except Exception as e: log.error(e) finally: log.debug('redis connection is good[%s]', good) return good def db_size(self): return self.conn.dbsize() def scan_iter(self, match='*', count=None): if match is not '*': match = '*' + match + '*' return self.conn.scan_iter(match=match, count=count) def get_str(self, key): log.debug('get str value by key: %s', key) return self.conn.get(key) def l_range(self, key): start = 0 end = -1 log.debug('get list value from %d to %d by key: %s', start, end, key) return self.conn.lrange(key, start, end) def z_range(self, key): start = 0 end = -1 log.debug('get sorted set value from %d to %d by key: %s', start, end, key) return self.conn.zrange(key, start, end) def s_members(self, key): log.debug('get set value by key: %s', key) return self.conn.smembers(key) def h_get_all(self, key): log.debug('get hash value by key: %s', key) return self.conn.hgetall(key) def get(self, t, k): f = self.__t_f_map__[t] return f(self, k) def type(self, key): log.debug('get type by key: %s', key) return self.conn.type(key) __t_f_map__ = { b'string': get_str, b'list': l_range, b'set': s_members, b'zset': z_range, b'hash': h_get_all, }
''' rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) redis_cluster_info = rc.cluster_info() redis_nodes_info = rc.info() redis_nodes_config = rc.config_get() ''' redis ip and port ''' monitor_redis_node = "192.168.12.171:6379" monitor_key = "used_memory" DBsize = ["dbsize", "keys", "cluster_size"] if monitor_key in redis_nodes_config[monitor_redis_node]: print redis_nodes_config[monitor_key] elif monitor_key in monitor_cluster_key: print str(redis_cluster_info[monitor_redis_node][monitor_key]) elif monitor_key in DBsize: print rc.dbsize()[monitor_redis_node] else: try: print redis_nodes_info[monitor_redis_node][monitor_key] except: print monitor_key + " is not found" #print redis_nodes_config["192.168.12.172:6379"] #print redis_nodes_info["192.168.12.172:6379"] #for k in redis_cluster_info: # print redis_cluster_info[k] # print k +" " + str(redis_cluster_info[k]["cluster_size"])