Example #1
0
 def __init__(self, cluster_name, timestamp, cinfo_data, cinfo_file):
     self.cluster_name = cluster_name
     self.timestamp = timestamp
     self.nodes = {}
     self.node_names = {}
     self.cinfo_data = self.ns_name_fault_check(cinfo_data)
     self.cinfo_file = cinfo_file
     self.node_lookup = LookupDict()
     self._initialize_nodes()
Example #2
0
    def __init__(self,
                 seed_nodes,
                 user=None,
                 password=None,
                 auth_mode=AuthMode.INTERNAL,
                 use_services_alumni=False,
                 use_services_alt=False,
                 ssl_context=None,
                 only_connect_seed=False,
                 timeout=5):
        """
        Want to be able to support multiple nodes on one box (for testing)
        seed_nodes should be the form (address,port,tls) address can be fqdn or ip.
        """

        self.__dict__ = self.cluster_state
        if self.cluster_state != {}:
            return

        self._timeout = timeout

        self.user = user
        self.password = password
        self.auth_mode = auth_mode

        Cluster.use_services_alumni = use_services_alumni
        Cluster.use_services_alt = use_services_alt

        # self.nodes is a dict from Node ID -> Node objects
        self.nodes = {}

        # to avoid multiple entries of endpoints for same server we are keeping
        # this pointers
        self.aliases = {}

        # self.node_lookup is a dict of (fqdn, port) -> Node
        # and (ip, port) -> Node, and node.node_id -> Node
        self.node_lookup = LookupDict()

        self._seed_nodes = set(seed_nodes)
        self._live_nodes = set()
        self.ssl_context = ssl_context

        # crawl the cluster search for nodes in addition to the seed nodes.
        self.last_cluster_refresh_time = 0
        self.only_connect_seed = only_connect_seed
        self._refresh_cluster()

        # to avoid same label (NODE column) for multiple nodes we need to keep track
        # of available nodes name, if names are same then we can use ip:port
        self._same_name_nodes = False
Example #3
0
    def setUp(self):
        self.cluster_patch = patch('lib.client.cluster.Cluster')
        #self.view_patch = patch('lib.view.CliView')

        real_stdoup = sys.stdout
        sys.stdout = StringIO()

        self.addCleanup(patch.stopall)
        self.addCleanup(reset_stdout)

        self.MockCluster = self.cluster_patch.start()
        #self.MockView = self.view_patch.start()
        Cluster._crawl = classmethod(lambda self: None)
        Cluster.call_node_method = classmethod(
            lambda self, nodes, method_name, *args, **kwargs:
            {"test": IOError("test error")})

        n = Node("172.99.99.99")
        Cluster.get_node = classmethod(lambda self, key: [n])

        pd = LookupDict()
        pd['test'] = 'test'

        Cluster.get_node_displaynames = classmethod(lambda self: pd)

        self.rc = BasicRootController()
    def setUp(self):
        self.test_dict = t = LookupDict()

        t['192.168.0.11'] = 1
        t['192.168.0.12'] = 2
        t['192.168.0.21'] = 3
        t['192.168.0.22'] = 4
Example #5
0
 def __init__(self, cluster_name, timestamp, cinfo_data, cinfo_file):
     self.cluster_name = cluster_name
     self.timestamp = timestamp
     self.nodes = {}
     self.node_names = {}
     self.cinfo_data = self.ns_name_fault_check(cinfo_data)
     self.cinfo_file = cinfo_file
     self.node_lookup = LookupDict()
     self._initialize_nodes()
Example #6
0
class CollectinfoSnapshot(object):

    def __init__(self, cluster_name, timestamp, cinfo_data, cinfo_file):
        self.cluster_name = cluster_name
        self.timestamp = timestamp
        self.nodes = {}
        self.node_names = {}
        self.cinfo_data = self.ns_name_fault_check(cinfo_data)
        self.cinfo_file = cinfo_file
        self.node_lookup = LookupDict()
        self._initialize_nodes()

    def _initialize_nodes(self):
        try:
            self._set_nodes(self.get_node_names())
            self._set_node_id()
            self._set_ip()
            self._set_xdr_build()
            self._set_asd_build()
            self._set_asd_version()
            self._set_cluster_name()
        except Exception:
            pass

    def destroy(self):
        try:
            del self.timestamp
            del self.cinfo_data
            del self.cinfo_file
            del self.nodes
            del self.node_names
        except Exception:
            pass

    def ns_name_fault_check(self, value):
        try:
            for node, node_data in value.iteritems():
                if not node or not node_data:
                    continue
                if not 'as_stat' in node_data:
                    continue

                if 'config' in node_data['as_stat']:
                    if 'namespace' in node_data['as_stat']['config']:
                        for ns in value[node]['as_stat']['config']['namespace'].keys():
                            if ' ' in ns:
                                del value[node]['as_stat']['config']['namespace'][ns]

                if 'statistics' in node_data['as_stat']:
                    if 'namespace' in node_data['as_stat']['statistics']:
                        for ns in value[node]['as_stat']['statistics']['namespace'].keys():
                            if ' ' in ns:
                                del value[node]['as_stat']['statistics']['namespace'][ns]
                                continue
                            if 'set' in value[node]['as_stat']['statistics']['namespace'][ns]:
                                for sets in value[node]['as_stat']['statistics']['namespace'][ns]['set'].keys():
                                    if ' ' in sets:
                                        del value[node]['as_stat']['statistics']['namespace'][ns]['set'][sets]

        except Exception:
            pass
        return value

    def get_node_displaynames(self):
        node_names = {}

        for key in self.get_node_names():
            if util.is_valid_ip_port(key):
                node_names[key] = key
            else:
                node_names[key] = self.node_lookup.get_shortname(
                    key, min_prefix_len=20, min_suffix_len=5)

        return node_names

    def get_node_names(self):
        if not self.node_names:
            if self.cinfo_data:
                node_names = self.cinfo_data.keys()
            else:
                return {}

            for node_name in node_names:
                self.node_names[node_name] = node_name
        return copy.deepcopy(self.node_names)

    def get_data(self, type="", stanza=""):
        data = {}

        if not type or not self.cinfo_data:
            return data

        try:
            # return copy.deepcopy(self.cinfo_data[type][stanza])
            for node, node_data in self.cinfo_data.iteritems():
                try:
                    if not node or not node_data:
                        continue

                    if not 'as_stat' in node_data or type not in node_data['as_stat']:
                        continue

                    if node not in data:
                        data[node] = {}

                    d = node_data['as_stat'][type]

                    if not stanza:
                        data[node] = copy.deepcopy(d)
                        continue

                    if stanza in ['namespace', 'bin', 'bins', 'set', 'sindex', 'namespace_list']:
                        d = d["namespace"]

                        if stanza == "namespace_list":
                            data[node] = d.keys()
                            continue

                        for ns_name in d.keys():
                            try:
                                if stanza == "namespace":
                                    data[node][ns_name] = copy.deepcopy(
                                        d[ns_name]["service"])

                                elif stanza == "bin" or stanza == "bins":
                                    data[node][ns_name] = copy.deepcopy(
                                        d[ns_name][stanza])

                                elif stanza == "set":
                                    for _name in d[ns_name][stanza]:
                                        _key = "%s %s" % (ns_name, _name)
                                        data[node][_key] = copy.deepcopy(
                                            d[ns_name][stanza][_name])

                                elif stanza == "sindex":
                                    for _name in d[ns_name][stanza]:
                                        try:
                                            set = d[ns_name][stanza][_name]["set"]
                                            _key = "%s %s %s" % (
                                                ns_name, set, _name)
                                        except Exception:
                                            continue

                                        data[node][_key] = copy.deepcopy(
                                            d[ns_name][stanza][_name])

                            except Exception:
                                pass

                    elif type == "meta_data" and stanza in ["endpoints", "services"]:
                        try:
                            data[node] = copy.deepcopy(d[stanza]).split(';')
                        except Exception:
                            data[node] = copy.deepcopy(d[stanza])

                    elif type == "meta_data" and stanza == "edition":
                        edition = copy.deepcopy(d[stanza])
                        data[node] = util.convert_edition_to_shortform(edition)

                    elif type == "histogram" and stanza == "object-size":
                        if stanza in d:
                            data[node] = copy.deepcopy(d[stanza])

                        else:
                            # old collectinfo does not have object-size-logarithmic
                            # it should return objsz if server version is old
                            as_version = node_data['as_stat']['meta_data']['asd_build']
                            if not common.is_new_histogram_version(as_version) and 'objsz' in d:
                                data[node] = copy.deepcopy(d['objsz'])

                            else:
                                data[node] = {}

                    else:
                        data[node] = copy.deepcopy(d[stanza])

                except Exception:
                    data[node] = {}

        except Exception:
            pass

        return data

    def get_sys_data(self, stanza=""):
        data = {}

        if not type or not stanza or not self.cinfo_data:
            return data

        try:
            for node, node_data in self.cinfo_data.iteritems():
                try:
                    if not node or not node_data:
                        continue

                    if not 'sys_stat' in node_data or stanza not in node_data['sys_stat']:
                        continue

                    data[node] = node_data['sys_stat'][stanza]

                except Exception:
                    data[node] = {}

        except Exception:
            pass

        return data

    def get_node(self, node_key):
        if node_key in self.nodes:
            return [self.nodes[node_key]]
        else:
            return [CollectinfoNode(self.timestamp, node_key, node_key)]

    def get_configs(self, stanza=""):
        return self.get_data(type="config", stanza=stanza)

    def get_statistics(self, stanza=""):
        return self.get_data(type="statistics", stanza=stanza)

    def get_histograms(self, stanza=""):
        return self.get_data(type="histogram", stanza=stanza)

    def get_summary(self, stanza=""):
        return self.get_data(type="summary", stanza=stanza)

    def get_expected_principal(self):
        try:
            principal = "0"
            for n in self.nodes.itervalues():
                if n.node_id == 'N/E':
                    if self._get_node_count() == 1:
                        return n.node_id
                    return "UNKNOWN_PRINCIPAL"
                if n.node_id.zfill(16) > principal.zfill(16):
                    principal = n.node_id
            return principal
        except Exception:
            return "UNKNOWN_PRINCIPAL"

    def get_xdr_build(self):
        xdr_build = {}
        try:
            for node in self.nodes:
                xdr_build[node] = self.nodes[node].xdr_build
        except Exception:
            pass
        return xdr_build

    def get_asd_build(self):
        asd_build = {}
        try:
            for node in self.nodes:
                asd_build[node] = self.nodes[node].asd_build
        except Exception:
            pass
        return asd_build

    def get_asd_version(self):
        asd_version = {}
        try:
            for node in self.nodes:
                asd_version[node] = self.nodes[node].asd_version
        except Exception:
            pass
        return asd_version

    def get_cluster_name(self):
        cluster_name = {}
        try:
            for node in self.nodes:
                cluster_name[node] = self.nodes[node].cluster_name
        except Exception:
            pass
        return cluster_name

    def _set_nodes(self, nodes):
        for node in nodes:
            self.node_names[node] = node
            self.nodes[node] = CollectinfoNode(self.timestamp, node)
            self.node_lookup[node] = node

    def _get_node_count(self):
        return len(self.nodes.keys())

    def _set_node_id(self):
        for node in self.nodes:
            try:
                self.nodes[node].set_node_id(
                    self.cinfo_data[node]['as_stat']['meta_data']['node_id'])
            except Exception:
                pass

    def _set_ip(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_ip(
                    self.cinfo_data[node]['as_stat']['meta_data']['ip'])
            except Exception:
                pass

    def _set_xdr_build(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_xdr_build(
                    self.cinfo_data[node]['as_stat']['meta_data']['xdr_build'])
            except Exception:
                pass

    def _set_asd_build(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_asd_build(
                    self.cinfo_data[node]['as_stat']['meta_data']['asd_build'])
            except Exception:
                pass

    def _set_asd_version(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_asd_version(
                    self.cinfo_data[node]['as_stat']['meta_data']['edition'])
            except Exception:
                pass

    def _set_cluster_name(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_cluster_name(self.cluster_name)
            except Exception:
                pass
Example #7
0
class CollectinfoSnapshot(object):

    def __init__(self, cluster_name, timestamp, cinfo_data, cinfo_file):
        self.cluster_name = cluster_name
        self.timestamp = timestamp
        self.nodes = {}
        self.node_names = {}
        self.cinfo_data = self.ns_name_fault_check(cinfo_data)
        self.cinfo_file = cinfo_file
        self.node_lookup = LookupDict()
        self._initialize_nodes()

    def _initialize_nodes(self):
        try:
            self._set_nodes(self.get_node_names())
            self._set_node_id()
            self._set_ip()
            self._set_xdr_build()
            self._set_asd_build()
            self._set_asd_version()
            self._set_cluster_name()
        except Exception:
            pass

    def destroy(self):
        try:
            del self.timestamp
            del self.cinfo_data
            del self.cinfo_file
            del self.nodes
            del self.node_names
        except Exception:
            pass

    def ns_name_fault_check(self, value):
        try:
            for node, node_data in value.iteritems():
                if not node or not node_data:
                    continue
                if not 'as_stat' in node_data:
                    continue

                if 'config' in node_data['as_stat']:
                    if 'namespace' in node_data['as_stat']['config']:
                        for ns in value[node]['as_stat']['config']['namespace'].keys():
                            if ' ' in ns:
                                del value[node]['as_stat']['config']['namespace'][ns]

                if 'statistics' in node_data['as_stat']:
                    if 'namespace' in node_data['as_stat']['statistics']:
                        for ns in value[node]['as_stat']['statistics']['namespace'].keys():
                            if ' ' in ns:
                                del value[node]['as_stat']['statistics']['namespace'][ns]
                                continue
                            if 'set' in value[node]['as_stat']['statistics']['namespace'][ns]:
                                for sets in value[node]['as_stat']['statistics']['namespace'][ns]['set'].keys():
                                    if ' ' in sets:
                                        del value[node]['as_stat']['statistics']['namespace'][ns]['set'][sets]

        except Exception:
            pass
        return value

    def get_node_displaynames(self):
        node_names = {}

        for key in self.get_node_names():
            if util.is_valid_ip_port(key):
                node_names[key] = key
            else:
                node_names[key] = self.node_lookup.get_shortname(
                    key, min_prefix_len=20, min_suffix_len=5)

        return node_names

    def get_node_names(self):
        if not self.node_names:
            if self.cinfo_data:
                node_names = self.cinfo_data.keys()
            else:
                return {}

            for node_name in node_names:
                self.node_names[node_name] = node_name
        return copy.deepcopy(self.node_names)

    def get_data(self, type="", stanza=""):
        data = {}

        if not type or not self.cinfo_data:
            return data

        try:
            # return copy.deepcopy(self.cinfo_data[type][stanza])
            for node, node_data in self.cinfo_data.iteritems():
                try:
                    if not node or not node_data:
                        continue

                    if not 'as_stat' in node_data or type not in node_data['as_stat']:
                        continue

                    if node not in data:
                        data[node] = {}

                    d = node_data['as_stat'][type]

                    if not stanza:
                        data[node] = copy.deepcopy(d)
                        continue

                    if stanza in ['namespace', 'bin', 'bins', 'set', 'sindex', 'namespace_list']:
                        d = d["namespace"]

                        if stanza == "namespace_list":
                            data[node] = d.keys()
                            continue

                        for ns_name in d.keys():
                            try:
                                if stanza == "namespace":
                                    data[node][ns_name] = copy.deepcopy(
                                        d[ns_name]["service"])

                                elif stanza == "bin" or stanza == "bins":
                                    data[node][ns_name] = copy.deepcopy(
                                        d[ns_name][stanza])

                                elif stanza == "set":
                                    for _name in d[ns_name][stanza]:
                                        _key = "%s %s" % (ns_name, _name)
                                        data[node][_key] = copy.deepcopy(
                                            d[ns_name][stanza][_name])

                                elif stanza == "sindex":
                                    for _name in d[ns_name][stanza]:
                                        try:
                                            set = d[ns_name][stanza][_name]["set"]
                                            _key = "%s %s %s" % (
                                                ns_name, set, _name)
                                        except Exception:
                                            continue

                                        data[node][_key] = copy.deepcopy(
                                            d[ns_name][stanza][_name])

                            except Exception:
                                pass

                    elif type == "meta_data" and stanza in ["endpoints", "services"]:
                        try:
                            data[node] = copy.deepcopy(d[stanza]).split(';')
                        except Exception:
                            data[node] = copy.deepcopy(d[stanza])

                    elif type == "meta_data" and stanza == "edition":
                        edition = copy.deepcopy(d[stanza])
                        data[node] = util.convert_edition_to_shortform(edition)

                    elif type == "histogram" and stanza == "object-size":
                        if stanza in d:
                            data[node] = copy.deepcopy(d[stanza])

                        else:
                            # old collectinfo does not have object-size-logarithmic
                            # it should return objsz if server version is <= SERVER_OLD_HISTOGRAM_LAST_VERSION
                            as_version = node_data['as_stat']['meta_data']['asd_build']
                            if not LooseVersion(as_version) > LooseVersion(constants.SERVER_OLD_HISTOGRAM_LAST_VERSION):
                                data[node] = copy.deepcopy(d['objsz'])

                            else:
                                data[node] = {}
                    else:
                        data[node] = copy.deepcopy(d[stanza])

                except Exception:
                    data[node] = {}

        except Exception:
            pass

        return data

    def get_sys_data(self, stanza=""):
        data = {}

        if not type or not stanza or not self.cinfo_data:
            return data

        try:
            for node, node_data in self.cinfo_data.iteritems():
                try:
                    if not node or not node_data:
                        continue

                    if not 'sys_stat' in node_data or stanza not in node_data['sys_stat']:
                        continue

                    data[node] = node_data['sys_stat'][stanza]

                except Exception:
                    data[node] = {}

        except Exception:
            pass

        return data

    def get_node(self, node_key):
        if node_key in self.nodes:
            return [self.nodes[node_key]]
        else:
            return [CollectinfoNode(self.timestamp, node_key, node_key)]

    def get_configs(self, stanza=""):
        return self.get_data(type="config", stanza=stanza)

    def get_statistics(self, stanza=""):
        return self.get_data(type="statistics", stanza=stanza)

    def get_histograms(self, stanza=""):
        return self.get_data(type="histogram", stanza=stanza)

    def get_summary(self, stanza=""):
        return self.get_data(type="summary", stanza=stanza)

    def get_expected_principal(self):
        try:
            principal = "0"
            for n in self.nodes.itervalues():
                if n.node_id == 'N/E':
                    if self._get_node_count() == 1:
                        return n.node_id
                    return "UNKNOWN_PRINCIPAL"
                if n.node_id.zfill(16) > principal.zfill(16):
                    principal = n.node_id
            return principal
        except Exception:
            return "UNKNOWN_PRINCIPAL"

    def get_xdr_build(self):
        xdr_build = {}
        try:
            for node in self.nodes:
                xdr_build[node] = self.nodes[node].xdr_build
        except Exception:
            pass
        return xdr_build

    def get_asd_build(self):
        asd_build = {}
        try:
            for node in self.nodes:
                asd_build[node] = self.nodes[node].asd_build
        except Exception:
            pass
        return asd_build

    def get_asd_version(self):
        asd_version = {}
        try:
            for node in self.nodes:
                asd_version[node] = self.nodes[node].asd_version
        except Exception:
            pass
        return asd_version

    def get_cluster_name(self):
        cluster_name = {}
        try:
            for node in self.nodes:
                cluster_name[node] = self.nodes[node].cluster_name
        except Exception:
            pass
        return cluster_name

    def _set_nodes(self, nodes):
        for node in nodes:
            self.node_names[node] = node
            self.nodes[node] = CollectinfoNode(self.timestamp, node)
            self.node_lookup[node] = node

    def _get_node_count(self):
        return len(self.nodes.keys())

    def _set_node_id(self):
        for node in self.nodes:
            try:
                self.nodes[node].set_node_id(
                    self.cinfo_data[node]['as_stat']['meta_data']['node_id'])
            except Exception:
                pass

    def _set_ip(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_ip(
                    self.cinfo_data[node]['as_stat']['meta_data']['ip'])
            except Exception:
                pass

    def _set_xdr_build(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_xdr_build(
                    self.cinfo_data[node]['as_stat']['meta_data']['xdr_build'])
            except Exception:
                pass

    def _set_asd_build(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_asd_build(
                    self.cinfo_data[node]['as_stat']['meta_data']['asd_build'])
            except Exception:
                pass

    def _set_asd_version(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_asd_version(
                    self.cinfo_data[node]['as_stat']['meta_data']['edition'])
            except Exception:
                pass

    def _set_cluster_name(self):

        for node in self.nodes:
            try:
                self.nodes[node].set_cluster_name(self.cluster_name)
            except Exception:
                pass
Example #8
0
class Cluster(object):
    # Kinda like a singleton... All instantiated classes will share the same
    # state.
    cluster_state = {}
    use_services_alumni = False
    use_services_alt = False
    crawl_lock = threading.Lock()

    def __init__(self,
                 seed_nodes,
                 user=None,
                 password=None,
                 use_services_alumni=False,
                 use_services_alt=False,
                 ssl_context=None,
                 only_connect_seed=False,
                 timeout=5):
        """
        Want to be able to support multiple nodes on one box (for testing)
        seed_nodes should be the form (address,port,tls) address can be fqdn or ip.
        """

        self.__dict__ = self.cluster_state
        if self.cluster_state != {}:
            return

        self._timeout = timeout

        self.user = user
        self.password = password
        Cluster.use_services_alumni = use_services_alumni
        Cluster.use_services_alt = use_services_alt

        # self.nodes is a dict from Node ID -> Node objects
        self.nodes = {}

        # to avoid multiple entries of endpoints for same server we are keeping
        # this pointers
        self.aliases = {}

        # self.node_lookup is a dict of (fqdn, port) -> Node
        # and (ip, port) -> Node, and node.node_id -> Node
        self.node_lookup = LookupDict()

        self._original_seed_nodes = set(seed_nodes)
        self._seed_nodes = set(seed_nodes)
        self._live_nodes = set()
        self.ssl_context = ssl_context

        # crawl the cluster search for nodes in addition to the seed nodes.
        self.last_cluster_refresh_time = 0
        self.only_connect_seed = only_connect_seed
        self._refresh_cluster()

    def __str__(self):
        nodes = self.nodes.values()
        if len(nodes) == 0:
            return ""

        online = [n.key for n in filter(lambda n: n.alive, nodes)]
        offline = [n.key for n in filter(lambda n: not n.alive, nodes)]

        retval = "Found %s nodes" % (len(nodes))
        if online:
            retval += "\nOnline:  %s" % (", ".join(online))
        if offline:
            retval += "\nOffline: %s" % (", ".join(offline))

        return retval

    def get_node_displaynames(self):
        node_names = {}
        for node_key, node in self.nodes.iteritems():
            k = node.sock_name(use_fqdn=True)
            if commonutil.is_valid_ip_port(k):
                node_names[node_key] = k
            else:
                node_names[node_key] = self.node_lookup.get_shortname(
                    k, min_prefix_len=20, min_suffix_len=5)

        return node_names

    def get_node_names(self):
        node_names = {}
        for node_key, node in self.nodes.iteritems():
            node_names[node_key] = node.sock_name(use_fqdn=True)

        return node_names

    def get_expected_principal(self):
        try:
            principal = "0"
            for k in self.nodes.keys():
                n = self.nodes[k]
                if n.node_id.zfill(16) > principal.zfill(16):
                    principal = n.node_id
            return principal
        except Exception as e:
            print e
            return ''

    def get_live_nodes(self):
        return self._live_nodes

    def get_visibility_error_nodes(self):
        visible = self.get_live_nodes()
        cluster_visibility_error_nodes = []
        for k in self.nodes.keys():
            node = self.nodes[k]
            if not node.alive:
                # in case of using alumni services, we might have offline nodes
                # which can't detect online nodes
                continue
            peers = util.flatten(node.peers)
            not_visible = set(visible) - set(peers)
            if len(not_visible) != 1:
                cluster_visibility_error_nodes.append(node.key)

        return cluster_visibility_error_nodes

    def get_down_nodes(self):
        cluster_down_nodes = []
        for k in self.nodes.keys():
            try:
                node = self.nodes[k]
                if not node.alive:
                    # in case of using alumni services, we might have offline
                    # nodes which can't detect online nodes
                    continue
                alumni_peers = util.flatten(node.get_alumni_peers())
                peers = util.flatten(node.get_peers())
                not_visible = set(alumni_peers) - set(peers)
                if len(not_visible) >= 1:
                    for n in not_visible:
                        _key = Node.create_key(n[0], n[1])
                        if _key not in cluster_down_nodes:
                            cluster_down_nodes.append(_key)
            except Exception:
                pass

        return cluster_down_nodes

    def update_aliases(self, aliases, endpoints, key):
        for e in endpoints:
            try:
                addr = e[0]
                port = e[1]
                node_key = Node.create_key(addr, port)
                if len(aliases) == 0 or not node_key in aliases:
                    # same node's service addresses not added already
                    aliases[node_key] = key
                else:
                    # same node's service addresses added already
                    # Ex. NIC down IP available in service list
                    # Avoid creation of two nodes
                    aliases_node_key = aliases[node_key]
                    if aliases_node_key != key:
                        node = self.nodes[aliases_node_key]
                        if not node.alive:
                            aliases[node_key] = key
            except Exception:
                pass

    def find_new_nodes(self):
        added_endpoints = []
        peers = []
        aliases = {}
        if self.nodes:
            for node_key in self.nodes.keys():
                node = self.nodes[node_key]
                node.refresh_connection()
                if node.key != node_key:
                    # change in service list
                    self.nodes.pop(node_key)
                    self.update_node(node)
                _endpoints = node.service_addresses
                self.update_aliases(aliases, _endpoints, node.key)
                added_endpoints = added_endpoints + _endpoints
                if not self.only_connect_seed:
                    peers = peers + node.peers
        else:
            peers = self._original_seed_nodes

        if not added_endpoints:
            return peers
        else:
            # IPv6 addresses are not available in service list we need
            # to check those missing endpoints and add into aliases list
            # following set operation removes only single IPv4 addresses
            # which are present in both list( for old server code < 3.10)
            # But it will keep peers-list as it is, so we will check it again
            # while crawling and update missing endpoints(IPv6) to aliases
            nodes_to_add = list(set(peers) - set(added_endpoints))
            self.aliases = copy.deepcopy(aliases)

        return nodes_to_add

    def _crawl(self):
        """
        Find all the nodes in the cluster and add them to self.nodes.
        """
        nodes_to_add = self.find_new_nodes()
        if not nodes_to_add or len(nodes_to_add) == 0:
            return
        try:
            all_services = set()
            visited = set()
            unvisited = set(nodes_to_add)

            while unvisited - visited:
                l_unvisited = list(unvisited)
                nodes = util.concurrent_map(self._register_node, l_unvisited)
                live_nodes = [
                    node for node in nodes
                    if (node is not None and node.alive and node not in visited
                        )
                ]
                visited |= unvisited
                unvisited.clear()

                if not self.only_connect_seed:
                    services_list = util.concurrent_map(
                        self._get_services, live_nodes)
                    for node, services in zip(live_nodes, services_list):
                        if isinstance(services, Exception):
                            continue
                        all_services.update(set(services))
                        all_services.add((node.ip, node.port, node.tls_name))
                unvisited = all_services - visited
            self._refresh_node_liveliness()
        except Exception:
            pass
        finally:
            self.clear_node_list()

    def clear_node_list(self):
        # remove old entries from self.nodes
        # helps to remove multiple entries of same node ( in case of service
        # list change or node is up after going down)
        service_nodes = set(self.aliases.values())
        for n in self.nodes.keys():
            if n not in service_nodes:
                self.nodes.pop(n)

    def _refresh_node_liveliness(self):
        live_nodes = [node for node in self.nodes.itervalues() if node.alive]
        self._live_nodes.clear()
        self._live_nodes.update(
            ((node.ip, node.port, node.tls_name) for node in live_nodes))

    def update_node(self, node):
        self.nodes[node.key] = node
        # add node to lookup
        self.node_lookup[node.sock_name(use_fqdn=True)] = node
        self.node_lookup[node.sock_name()] = node
        if node.alive:
            self.node_lookup[node.node_id] = node

    def get_node(self, node):
        return self.node_lookup[node]

    def _register_node(self, addr_port_tls):
        if not addr_port_tls:
            return None
        if not isinstance(addr_port_tls, tuple):
            return None
        if not isinstance(addr_port_tls[0], tuple):
            return self._create_node(addr_port_tls, force=True)

        new_node = None
        for i, a_p_t in enumerate(addr_port_tls):
            if i == len(addr_port_tls) - 1:
                new_node = self._create_node(a_p_t, force=True)
            else:
                new_node = self._create_node(a_p_t)
            if not new_node:
                continue
            else:
                break
        self.update_aliases(self.aliases, addr_port_tls, new_node.key)
        return new_node

    def is_present_as_alias(self, addr, port, aliases=None):
        if not aliases:
            aliases = self.aliases
        return Node.create_key(addr, port) in aliases

    def get_node_for_alias(self, addr, port):
        try:
            if self.is_present_as_alias(addr, port):
                return self.nodes[self.aliases[Node.create_key(addr, port)]]
        except Exception:
            pass
        return None

    def _create_node(self, addr_port_tls, force=False):
        """
        Instantiate and return a new node

        If cannot instantiate node, return None.
        Creates a new node if:
           1) key(addr,port) is not available in self.aliases
        """
        try:
            # tuple of length 3 for server version >= 3.10.0 (with tls name)
            addr, port, tls_name = addr_port_tls
        except Exception:
            try:
                # tuple of length 2 for server version < 3.10.0 ( without tls
                # name)
                addr, port = addr_port_tls
                tls_name = None
            except Exception:
                print "ip_port is expected to be a tuple of len 2, " + \
                    "instead it is of type %s and str value of %s" % (
                        type(addr_port_tls), str(addr_port_tls))
                return None
        try:
            if self.is_present_as_alias(addr, port):
                # Alias entry already added for this endpoint
                n = self.get_node_for_alias(addr, port)
                if n:
                    # Node already added for this endpoint
                    # No need to check for offline/online as we already did
                    # this while finding new nodes to add
                    return n
                # else
                # Will create node again

            # if not existing:
            new_node = Node(addr,
                            port,
                            tls_name=tls_name,
                            timeout=self._timeout,
                            user=self.user,
                            password=self.password,
                            consider_alumni=Cluster.use_services_alumni,
                            use_services_alt=Cluster.use_services_alt,
                            ssl_context=self.ssl_context)

            if not new_node:
                return new_node
            if not new_node.alive:
                if not force:
                    # Check other endpoints
                    new_node.close()
                    return None
            self.update_node(new_node)
            self.update_aliases(self.aliases, new_node.service_addresses,
                                new_node.key)
            return new_node
        except Exception:
            return None

    @staticmethod
    def _get_services(node):
        """
        Given a node object return its services list / peers list
        """
        try:
            return node.peers
        except Exception:
            return []

    def need_to_refresh_cluster(self):
        if time() - self.last_cluster_refresh_time > CLUSTER_REFRESH_INTERVAL:
            return True
        return False

    def _refresh_cluster(self):
        with Cluster.crawl_lock:
            try:
                if self.need_to_refresh_cluster():
                    self._crawl()
                    self.last_cluster_refresh_time = time()
            except Exception as e:
                print e
                raise e

    def call_node_method(self, nodes, method_name, *args, **kwargs):
        """
        Run a particular method command across a set of nodes
        nodes is a list of nodes to to run the command against.
        if nodes is None then we run on all nodes.
        """
        if self.need_to_refresh_cluster():
            self._refresh_cluster()

        if nodes == 'all':
            use_nodes = self.nodes.values()
        elif isinstance(nodes, list):
            use_nodes = []
            for node in nodes:
                try:
                    node_list = self.get_node(node)
                    if isinstance(node_list, list):
                        use_nodes.extend(self.get_node(node))
                    else:
                        use_nodes.append(self.get_node(node))
                except Exception:  # Ignore ambiguous and key errors
                    continue
        else:
            raise TypeError("nodes should be 'all' or list found %s" %
                            type(nodes))
        if len(use_nodes) == 0:
            raise IOError('Unable to find any Aerospike nodes')
        return dict(
            util.concurrent_map(
                lambda node: (node.key, getattr(node, method_name)
                              (*args, **kwargs)), use_nodes))

    def is_XDR_enabled(self, nodes='all'):
        return self.call_node_method(nodes, 'is_XDR_enabled')

    def is_feature_present(self, feature, nodes='all'):
        return self.call_node_method(nodes, 'is_feature_present', feature)

    def get_IP_to_node_map(self):
        if self.need_to_refresh_cluster():
            self._refresh_cluster()
        node_map = {}
        for a in self.aliases.keys():
            try:
                node_map[a] = self.nodes.get(self.aliases[a]).node_id
            except Exception:
                pass
        return node_map

    def get_node_to_IP_map(self):
        if self.need_to_refresh_cluster():
            self._refresh_cluster()
        ip_map = {}
        for a in self.aliases.keys():
            try:
                id = self.nodes.get(self.aliases[a]).node_id
                if id in ip_map:
                    ip_map[id] = ip_map[id] + ", " + a
                else:
                    ip_map[id] = a
            except Exception:
                pass
        return ip_map

    def __getattr__(self, name):
        regex = re.compile("^info.*$|^xdr.*$")
        if regex.match(name):

            def info_func(*args, **kwargs):
                if 'nodes' not in kwargs:
                    nodes = 'all'
                else:
                    nodes = kwargs['nodes']
                    del kwargs['nodes']

                return self.call_node_method(nodes, name, *args, **kwargs)

            return info_func
        else:
            raise AttributeError("Cluster has not attribute '%s'" % (name))

    def close(self):
        for node_key in self.nodes.keys():
            try:
                node = self.nodes[node_key]
                node.close()
            except Exception:
                pass
        self.nodes = None
        self.node_lookup = None