def getRedisKeyValues(host, port, keyFilter):

    try:
        startup_nodes = [{"host": host, "port": port}]
        rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)
        info = rc.info()
        for key in info:
            print "%s: %s" % (key, info[key])

        result = []
        # keysCount = len(rc.keys())

        count = 0
        for key in rc.keys():
            if count >= 1024:
                break
            else:
                if key.find(keyFilter) != -1:
                    count += 1
                    # 查询类型
                    # (key, value)
                    result.append([key, getValue(rc, key)])
            #        pprint.pprint("type:" + rc.type(key) + "$key:" + key + "$value:" + getValue(rc, key))
            #        print rc.get(key)
        return result
    except Exception, e:
        return [["<font color='red'>Error Info</font>", "<font color='red'>%s</font>" % (str(e) + "<br/>请检查ip和端口是否正确")]]
Esempio n. 2
0
'''
    redis cluster info for keys
'''
monitor_cluster_key = [
    'cluster_state', 'cluster_slots_assigned', 'cluster_known_nodes',
    'cluster_slots_fail', 'cluster_stats_messages_received', 'cluster_size',
    'cluster_current_epoch', 'cluster_stats_messages_sent',
    'cluster_slots_pfail', 'cluster_my_epoch', 'cluster_slots_ok'
]
'''
    redis-py-cluster
'''

rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)
redis_cluster_info = rc.cluster_info()
redis_nodes_info = rc.info()
redis_nodes_config = rc.config_get()
'''
        redis ip and port
'''

monitor_redis_node = "192.168.12.171:6379"
monitor_key = "used_memory"
DBsize = ["dbsize", "keys", "cluster_size"]

if monitor_key in redis_nodes_config[monitor_redis_node]:
    print redis_nodes_config[monitor_key]
elif monitor_key in monitor_cluster_key:
    print str(redis_cluster_info[monitor_redis_node][monitor_key])
elif monitor_key in DBsize:
    print rc.dbsize()[monitor_redis_node]

if __name__ == "__main__":
    hostIp = "10.230.136.108"

    #    __pool = redis.ConnectionPool(host=hostIp, port=7000, db=0)
    #    r = redis.Redis(connection_pool=__pool)

    startup_nodes = [{"host": hostIp, "port": "6419"}]
    #    startup_nodes = [{"host": "10.230.136.69", "port": "7000"},
    #                 {"host": "10.230.136.70", "port": "7000"},
    #                 {"host": "10.230.136.71", "port": "7000"}]
    rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)

    #    print rc.get("foo")
    info = rc.info()

    #    for key in info:
    #      print "%s: %s" % (key, info[key])

    #    pprint.pprint(dir(rc))

    print "-------------------"

    #    pprint.pprint( rc.keys())
    # zset
    rc.zadd("zset_", 9000, "zset.cc")
    # set
    rc.sadd("set_", "set.cc")
    # hash
    rc.hset("hash", "filed", "123456789")
Esempio n. 4
0
class RedisMonitor():
    DEFAULT_INDICES = ["redis_git_dirty", "arch_bits", "uptime_in_seconds", "uptime_in_days", "hz", "lru_clock",
                       "connected_clients", "client_longest_output_list", "client_biggest_input_buf", "blocked_clients",
                       "used_memory", "used_memory_rss", "used_memory_peak", "used_memory_peak_perc",
                       "used_memory_overhead",
                       "used_memory_startup", "used_memory_dataset", "used_memory_dataset_perc", "total_system_memory",
                       "used_memory_lua", "maxmemory", "mem_fragmentation_ratio", "active_defrag_running",
                       "lazyfree_pending_objects", "loading", "rdb_changes_since_last_save", "rdb_bgsave_in_progress",
                       "rdb_last_save_time", "rdb_last_bgsave_time_sec", "rdb_current_bgsave_time_sec",
                       "rdb_last_cow_size",
                       "aof_current_size", "aof_rewrite_buffer_length", "aof_pending_bio_fsync", "aof_delayed_fsync",
                       "aof_base_size", "aof_pending_rewrite", "aof_buffer_length", "aof_enabled",
                       "aof_rewrite_in_progress",
                       "aof_rewrite_scheduled", "aof_last_rewrite_time_sec", "aof_current_rewrite_time_sec",
                       "aof_last_cow_size", "total_connections_received", "total_commands_processed",
                       "instantaneous_ops_per_sec", "total_net_input_bytes", "total_net_output_bytes",
                       "instantaneous_input_kbps", "instantaneous_output_kbps", "rejected_connections", "sync_full",
                       "sync_partial_ok", "sync_partial_err", "expired_keys", "expired_stale_perc",
                       "expired_time_cap_reached_count", "evicted_keys", "keyspace_hits", "keyspace_misses",
                       "pubsub_channels",
                       "pubsub_channels", "pubsub_patterns", "latest_fork_usec", "migrate_cached_sockets",
                       "slave_expires_tracked_keys", "active_defrag_hits", "active_defrag_misses",
                       "active_defrag_key_hits",
                       "active_defrag_key_misses", "connected_slaves", "master_repl_offset", "second_repl_offset",
                       "repl_backlog_active", "repl_backlog_size", "repl_backlog_first_byte_offset",
                       "repl_backlog_histlen",
                       "used_cpu_sys", "used_cpu_user", "used_cpu_sys_children", "used_cpu_user_children",
                       "db0.avg_ttl",
                       "db0.expires", "db0.keys"
                       ]

    # DEFAULT_INDICES = ["redis_git_dirty", "arch_bits"]

    def __init__(self, config, filename, indices=DEFAULT_INDICES):
        self.indices = indices
        self.TAGKV = {}
        self.id_indice = "redis_build_id"
        self.id = None
        self.filename = filename
        self.hosts = config["hosts"]
        self.metrics_namespace_prefix = '' + '.rd.' + self.filename
        startup_nodes = []
        for host in self.hosts:
            temp = {}
            ip = host.split(":")[0]
            port = host.split(":")[1]
            temp["host"] = ip
            temp["port"] = port
            startup_nodes.append(temp)
        try:
            self.client = StrictRedisCluster(startup_nodes=startup_nodes, password=config['password'])
            self.define_tagkv()

        except Exception as e:
            logger.info("Connect Error!",e)
            sys.exit(1)

    def monitor(self):
        metrics.define_counter("indices.monitor.error", self.metrics_namespace_prefix)
        try:
            info = self.client.info()

            for node_name in info:
                cur_tagk = {"instance": node_name.replace(".", "_").replace(":", "_")}
                logger.info("cur_tagk={}".format(cur_tagk))
                node_value = info[node_name]
                for path in self.indices:
                    indices_value = self.find(node_value, path)
                    if indices_value is None:
                        logger.info("can not find path={}".format(path))
                        continue
                    metrics.emit_store(path, indices_value, prefix=self.metrics_namespace_prefix, tagkv=cur_tagk)

                    logger.info("key={},value={},tagv={}".format(path, indices_value, cur_tagk))

        except Exception as e:
            logger.warning(e)
            metrics.emit_counter("indices.monitor.error", 1, prefix=self.metrics_namespace_prefix,
                                 tagkv={"instance": self.filename})

        finally:
            pass

    def define_tagkv(self):
        tagv_arr = [self.filename]
        for host in self.hosts:
            temp = {}
            ip = host.split(":")[0]
            port = host.split(":")[1]
            temp["host"] = ip
            temp["port"] = port
            tagv_arr.append(host.replace(".", "_").replace(":", "_"))
        metrics.define_tagkv('instance', tagv_arr)
        for index in self.indices:
            metrics.define_store(index, self.metrics_namespace_prefix)

    def find(self, temp, path):
        segments = path.split('.')
        logging.debug("segments={}".format(segments))
        for segment in segments:
            temp = temp.get(segment)
            logging.debug("segment={},temp={}".format(segment, temp))
            if temp is None:
                return None

        logging.debug("segment={}, find value={}".format(segment, temp))
        return temp