def agent_changed(rid=None, unit=None):
    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return
    if 'amqp' not in CONFIGS.complete_contexts():
        juju_log('amqp relation incomplete. Peer not ready?')
        return
    if 'identity-service' not in CONFIGS.complete_contexts():
        juju_log('identity-service relation incomplete. Peer not ready?')
        return

    juju_log('**********rid is %s' % str(rid))
    juju_log('**********unit is %s' % str(unit))
    rel_settings = relation_get(rid=rid, unit=unit)
    agent_hostname = rel_settings.get('hostname')
    agent_hostaddress = rel_settings.get('hostaddress')
    juju_log('**********agent_hostname is %s' % agent_hostname)
    juju_log('**********agent_hostaddress is %s' % agent_hostaddress)
    host = unit_get('private-address')
    hostname = get_hostname(host)
    hostaddress = get_host_ip(host)
    juju_log('**********host is %s' % host)
    juju_log('**********hostname is %s' % hostname)
    juju_log('**********hostaddress is %s' % hostaddress)
    hosts = [agent_hostname, hostname, agent_hostaddress, hostaddress]
    for host in hosts:
        if host:
            add_known_host(host, unit=unit, user='******')
def ssh_compute_add(public_key, rid=None, unit=None, user=None):
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    private_address = relation_get(rid=rid, unit=unit,
                                   attribute='private-address')
    hosts = [private_address]

    if not is_ipv6(private_address):
        if relation_get('hostname'):
            hosts.append(relation_get('hostname'))

        if not is_ip(private_address):
            hosts.append(get_host_ip(private_address))
            hosts.append(private_address.split('.')[0])
        else:
            hn = get_hostname(private_address)
            hosts.append(hn)
            hosts.append(hn.split('.')[0])

    for host in list(set(hosts)):
        add_known_host(host, unit, user)

    if not ssh_authorized_key_exists(public_key, unit, user):
        log('Saving SSH authorized key for compute host at %s.' %
            private_address)
        add_authorized_key(public_key, unit, user)
예제 #3
0
def ssh_compute_add(public_key, rid=None, unit=None, user=None):
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    private_address = relation_get(rid=rid,
                                   unit=unit,
                                   attribute='private-address')
    hosts = [private_address]

    if not is_ipv6(private_address):
        if relation_get('hostname'):
            hosts.append(relation_get('hostname'))

        if not is_ip(private_address):
            hosts.append(get_host_ip(private_address))
            short = private_address.split('.')[0]
            if ns_query(short):
                hosts.append(short)
        else:
            hn = get_hostname(private_address)
            if hn:
                hosts.append(hn)
                short = hn.split('.')[0]
                if ns_query(short):
                    hosts.append(short)

    for host in list(set(hosts)):
        add_known_host(host, unit, user)

    if not ssh_authorized_key_exists(public_key, unit, user):
        log('Saving SSH authorized key for compute host at %s.' %
            private_address)
        add_authorized_key(public_key, unit, user)
예제 #4
0
def agent_joined(relation_id=None):
    initialize_ssh_keys()
    host = unit_get('private-address')
    settings = {
        'hostname': get_hostname(host),
        'hostaddress': get_host_ip(host)
    }

    relation_set(relation_id=relation_id, **settings)
예제 #5
0
def cluster_with():
    vers = rabbit_version()
    if vers >= '3.0.1-1':
        cluster_cmd = 'join_cluster'
        cmd = [
            RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' '
            '\'{"ha-mode": "all"}\''
        ]
        subprocess.check_call(cmd)
    else:
        cluster_cmd = 'cluster'
    out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
    current_host = subprocess.check_output(['hostname']).strip()

    # check all peers and try to cluster with them
    available_nodes = []
    first_hostname = utils.relation_get('host')
    available_nodes.append(first_hostname)

    for r_id in (utils.relation_ids('cluster') or []):
        for unit in (utils.relation_list(r_id) or []):
            address = utils.relation_get('private_address',
                                         rid=r_id,
                                         unit=unit)
            if address is not None:
                node = get_hostname(address, fqdn=False)
                if current_host != node:
                    available_nodes.append(node)

    # iterate over all the nodes, join to the first available
    for node in available_nodes:
        utils.juju_log('INFO',
                       'Clustering with remote rabbit host (%s).' % node)
        for line in out.split('\n'):
            if re.search(node, line):
                utils.juju_log('INFO',
                               'Host already clustered with %s.' % node)
                return

            try:
                cmd = [RABBITMQ_CTL, 'stop_app']
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, 'start_app']
                subprocess.check_call(cmd)
                utils.juju_log('INFO', 'Host clustered with %s.' % node)
                return
            except:
                # continue to the next node
                pass

    # error, no nodes available for clustering
    utils.juju_log('ERROR', 'No nodes available for clustering')
    sys.exit(1)
예제 #6
0
def cluster_with():
    vers = rabbit_version()
    if vers >= '3.0.1-1':
        cluster_cmd = 'join_cluster'
        cmd = [RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' '
               '\'{"ha-mode": "all"}\'']
        subprocess.check_call(cmd)
    else:
        cluster_cmd = 'cluster'
    out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
    current_host = subprocess.check_output(['hostname']).strip()

    # check all peers and try to cluster with them
    available_nodes = []
    first_hostname = utils.relation_get('host')
    available_nodes.append(first_hostname)

    for r_id in (utils.relation_ids('cluster') or []):
        for unit in (utils.relation_list(r_id) or []):
            address = utils.relation_get('private_address',
                                         rid=r_id, unit=unit)
            if address is not None:
                node = get_hostname(address, fqdn=False)
                if current_host != node:
                    available_nodes.append(node)

    # iterate over all the nodes, join to the first available
    for node in available_nodes:
        utils.juju_log('INFO',
                       'Clustering with remote rabbit host (%s).' % node)
        for line in out.split('\n'):
            if re.search(node, line):
                utils.juju_log('INFO',
                               'Host already clustered with %s.' % node)
                return

            try:
                cmd = [RABBITMQ_CTL, 'stop_app']
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, 'start_app']
                subprocess.check_call(cmd)
                utils.juju_log('INFO', 'Host clustered with %s.' % node)
                return
            except:
                # continue to the next node
                pass

    # error, no nodes available for clustering
    utils.juju_log('ERROR', 'No nodes available for clustering')
    sys.exit(1)
def get_node_hostname(ip_addr):
    ''' Resolve IP address to hostname '''
    try:
        nodename = get_hostname(ip_addr, fqdn=False)
    except:
        log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
            level=WARNING)
        log('Falling back to use socket.gethostname()', level=WARNING)
        # If the private-address is not resolvable using DNS
        # then use the current hostname
        nodename = socket.gethostname()
    log('local nodename: %s' % nodename, level=INFO)
    return nodename
def get_node_hostname(ip_addr):
    ''' Resolve IP address to hostname '''
    try:
        nodename = get_hostname(ip_addr, fqdn=False)
    except:
        log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
            level=WARNING)
        log('Falling back to use socket.gethostname()',
            level=WARNING)
        # If the private-address is not resolvable using DNS
        # then use the current hostname
        nodename = socket.gethostname()
    log('local nodename: %s' % nodename, level=INFO)
    return nodename
예제 #9
0
def ssh_compute_add_host_and_key(public_key,
                                 hostname,
                                 private_address,
                                 application_name,
                                 user=None):
    """Add a compute nodes ssh details to local cache.

    Collect various hostname variations and add the corresponding host keys to
    the local known hosts file. Finally, add the supplied public key to the
    authorized_key file.

    :param public_key: Public key.
    :type public_key: str
    :param hostname: Hostname to collect host keys from.
    :type hostname: str
    :param private_address:aCorresponding private address for hostname
    :type private_address: str
    :param application_name: Name of application eg nova-compute-something
    :type application_name: str
    :param user: The user that the ssh asserts are for.
    :type user: str
    """
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    hosts = [private_address]

    if not is_ipv6(private_address):
        if hostname:
            hosts.append(hostname)

        if is_ip(private_address):
            hn = get_hostname(private_address)
            if hn:
                hosts.append(hn)
                short = hn.split('.')[0]
                if ns_query(short):
                    hosts.append(short)
        else:
            hosts.append(get_host_ip(private_address))
            short = private_address.split('.')[0]
            if ns_query(short):
                hosts.append(short)

    for host in list(set(hosts)):
        add_known_host(host, application_name, user)

    if not ssh_authorized_key_exists(public_key, application_name, user):
        log('Saving SSH authorized key for compute host at %s.' %
            private_address)
        add_authorized_key(public_key, application_name, user)
def get_local_nodename():
    '''Resolve local nodename into something that's universally addressable'''
    ip_addr = get_host_ip(unit_get('private-address'))
    log('getting local nodename for ip address: %s' % ip_addr, level=INFO)
    try:
        nodename = get_hostname(ip_addr, fqdn=False)
    except:
        log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
            level='WARNING')
        log('Falling back to use socket.gethostname()',
            level='WARNING')
        # If the private-address is not resolvable using DNS
        # then use the current hostname
        nodename = socket.gethostname()
    log('local nodename: %s' % nodename, level=INFO)
    return nodename
def ssh_compute_add_host_and_key(public_key, hostname, private_address,
                                 application_name, user=None):
    """Add a compute nodes ssh details to local cache.

    Collect various hostname variations and add the corresponding host keys to
    the local known hosts file. Finally, add the supplied public key to the
    authorized_key file.

    :param public_key: Public key.
    :type public_key: str
    :param hostname: Hostname to collect host keys from.
    :type hostname: str
    :param private_address:aCorresponding private address for hostname
    :type private_address: str
    :param application_name: Name of application eg nova-compute-something
    :type application_name: str
    :param user: The user that the ssh asserts are for.
    :type user: str
    """
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    hosts = [private_address]

    if not is_ipv6(private_address):
        if hostname:
            hosts.append(hostname)

        if is_ip(private_address):
            hn = get_hostname(private_address)
            if hn:
                hosts.append(hn)
                short = hn.split('.')[0]
                if ns_query(short):
                    hosts.append(short)
        else:
            hosts.append(get_host_ip(private_address))
            short = private_address.split('.')[0]
            if ns_query(short):
                hosts.append(short)

    for host in list(set(hosts)):
        add_known_host(host, application_name, user)

    if not ssh_authorized_key_exists(public_key, application_name, user):
        log('Saving SSH authorized key for compute host at %s.' %
            private_address)
        add_authorized_key(public_key, application_name, user)
예제 #12
0
def agent_changed(rid=None, unit=None):
    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return
    if 'amqp' not in CONFIGS.complete_contexts():
        juju_log('amqp relation incomplete. Peer not ready?')
        return

    with open('/etc/manifest/server.manifest') as server_manifest:
        flag = 'token-tenant' in server_manifest.read()

    if flag:
        rel_settings = relation_get(rid=rid, unit=unit)
        key = rel_settings.get('ssh_public_key')
        juju_log("**********key is %s" % str(key))
        if not key:
            juju_log('peer did not publish key?')
            return
        ssh_controller_key_add(key, rid=rid, unit=unit)
        host = unit_get('private-address')
        hostname = get_hostname(host)
        hostaddress = get_host_ip(host)
        juju_log("**********host is %s" % host)
        juju_log("**********hostname is %s" % hostname)
        juju_log("**********hostaddress is %s" % hostaddress)
        with open('/etc/hosts', 'a') as hosts:
            hosts.write('%s  %s' % (hostaddress, hostname) + '\n')

        token_tenant = rel_settings.get('token_tenant')
        juju_log("**********token_tenant is %s" % token_tenant)
        rsync(
            charm_dir() + '/files/server.manifest',
            '/etc/manifest/server.manifest'
        )
        c_hostaddress = rel_settings.get('hostaddress')
        juju_log("**********controller_hostaddress is %s" % c_hostaddress)
        subprocess.check_call(['sudo', 'sed', '-i', 's/^controller_ip/%s/g' % c_hostaddress,
                               '/etc/manifest/server.manifest'])
        subprocess.check_call(['sudo', 'sed', '-i', 's/token-tenant/%s/g' % token_tenant,
                               '/etc/manifest/server.manifest'])
        subprocess.check_call(['sudo', 'service', 'vsm-agent', 'stop'])
        subprocess.check_call(['sudo', 'service', 'vsm-agent', 'start'])
        subprocess.check_call(['sudo', 'service', 'vsm-physical', 'stop'])
        subprocess.check_call(['sudo', 'service', 'vsm-physical', 'start'])
        juju_log("**********start vsm-agent")
        juju_log("**********start vsm-physical")
def agent_joined(relation_id=None):
    initialize_ssh_keys()
    host = unit_get('private-address')
    settings = {
        'hostname': get_hostname(host),
        'hostaddress': get_host_ip(host)
    }

    keystone_host = auth_token_config('identity_uri').split('/')[2].split(':')[0]
    admin_tenant_name = auth_token_config('admin_tenant_name')
    admin_user = auth_token_config('admin_user')
    admin_password = auth_token_config('admin_password')
    args = ['agent-token', admin_tenant_name, admin_user, admin_password, keystone_host]
    token_tenant = subprocess.check_output(args).strip('\n')
    settings['token_tenant'] = token_tenant

    settings['ssh_public_key'] = public_ssh_key()
    relation_set(relation_id=relation_id, **settings)
def ssh_compute_add(public_key, user=None):
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    private_address = relation_get("private-address")
    hosts = [private_address]

    if not is_ip(private_address):
        hosts.append(get_host_ip(private_address))
        hosts.append(private_address.split(".")[0])
    else:
        hn = get_hostname(private_address)
        hosts.append(hn)
        hosts.append(hn.split(".")[0])

    for host in list(set(hosts)):
        if not ssh_known_host_key(host, user):
            add_known_host(host, user)

    if not ssh_authorized_key_exists(public_key, user):
        log("Saving SSH authorized key for compute host at %s." % private_address)
        add_authorized_key(public_key, user)
예제 #15
0
def reassign_agent_resources():
    ''' Use agent scheduler API to detect down agents and re-schedule '''
    env = NetworkServiceContext()()
    if not env:
        log('Unable to re-assign resources at this time')
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        ''' Try to import neutronclient instead for havana+ '''
        from neutronclient.v2_0 import client

    auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' % env
    quantum = client.Client(username=env['service_username'],
                            password=env['service_password'],
                            tenant_name=env['service_tenant'],
                            auth_url=auth_url,
                            region_name=env['region'])

    partner_gateways = [unit_private_ip().split('.')[0]]
    for partner_gateway in relations_of_type(reltype='cluster'):
        gateway_hostname = get_hostname(partner_gateway['private-address'])
        partner_gateways.append(gateway_hostname.partition('.')[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('DHCP Agent %s down' % agent['id'])
            for network in \
                    quantum.list_networks_on_dhcp_agent(
                        agent['id'])['networks']:
                networks[network['id']] = agent['id']
        else:
            if agent['host'].partition('.')[0] in partner_gateways:
                dhcp_agents.append(agent['id'])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('L3 Agent %s down' % agent['id'])
            for router in \
                    quantum.list_routers_on_l3_agent(
                        agent['id'])['routers']:
                routers[router['id']] = agent['id']
        else:
            if agent['host'].split('.')[0] in partner_gateways:
                l3_agents.append(agent['id'])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log('Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log('Moving router %s from %s to %s' %
            (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
                                            router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
                                       body={'router_id': router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log('Moving network %s from %s to %s' %
            (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
                                               network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent],
                                          body={'network_id': network_id})
        index += 1
예제 #16
0
def reassign_agent_resources():
    ''' Use agent scheduler API to detect down agents and re-schedule '''
    env = NetworkServiceContext()()
    if not env:
        log('Unable to re-assign resources at this time')
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        ''' Try to import neutronclient instead for havana+ '''
        from neutronclient.v2_0 import client

    auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' % env
    quantum = client.Client(username=env['service_username'],
                            password=env['service_password'],
                            tenant_name=env['service_tenant'],
                            auth_url=auth_url,
                            region_name=env['region'])

    partner_gateways = [unit_private_ip().split('.')[0]]
    for partner_gateway in relations_of_type(reltype='cluster'):
        gateway_hostname = get_hostname(partner_gateway['private-address'])
        partner_gateways.append(gateway_hostname.partition('.')[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('DHCP Agent %s down' % agent['id'])
            for network in \
                    quantum.list_networks_on_dhcp_agent(
                        agent['id'])['networks']:
                networks[network['id']] = agent['id']
        else:
            if agent['host'].partition('.')[0] in partner_gateways:
                dhcp_agents.append(agent['id'])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('L3 Agent %s down' % agent['id'])
            for router in \
                    quantum.list_routers_on_l3_agent(
                        agent['id'])['routers']:
                routers[router['id']] = agent['id']
        else:
            if agent['host'].split('.')[0] in partner_gateways:
                l3_agents.append(agent['id'])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log('Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log('Moving router %s from %s to %s' %
            (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
                                            router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
                                       body={'router_id': router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log('Moving network %s from %s to %s' %
            (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
                                               network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent],
                                          body={'network_id': network_id})
        index += 1
예제 #17
0
def reassign_agent_resources():
    """ Use agent scheduler API to detect down agents and re-schedule """
    env = NetworkServiceContext()()
    if not env:
        log("Unable to re-assign resources at this time")
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        """ Try to import neutronclient instead for havana+ """
        from neutronclient.v2_0 import client

    auth_url = "%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0" % env
    quantum = client.Client(
        username=env["service_username"],
        password=env["service_password"],
        tenant_name=env["service_tenant"],
        auth_url=auth_url,
        region_name=env["region"],
    )

    partner_gateways = [unit_private_ip().split(".")[0]]
    for partner_gateway in relations_of_type(reltype="cluster"):
        gateway_hostname = get_hostname(partner_gateway["private-address"])
        partner_gateways.append(gateway_hostname.partition(".")[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents["agents"]:
        if not agent["alive"]:
            log("DHCP Agent %s down" % agent["id"])
            for network in quantum.list_networks_on_dhcp_agent(agent["id"])["networks"]:
                networks[network["id"]] = agent["id"]
        else:
            if agent["host"].partition(".")[0] in partner_gateways:
                dhcp_agents.append(agent["id"])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents["agents"]:
        if not agent["alive"]:
            log("L3 Agent %s down" % agent["id"])
            for router in quantum.list_routers_on_l3_agent(agent["id"])["routers"]:
                routers[router["id"]] = agent["id"]
        else:
            if agent["host"].split(".")[0] in partner_gateways:
                l3_agents.append(agent["id"])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log(
            "Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster"
            % (len(dhcp_agents), len(l3_agents))
        )
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log("Moving router %s from %s to %s" % (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id], router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent], body={"router_id": router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log("Moving network %s from %s to %s" % (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id], network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent], body={"network_id": network_id})
        index += 1
예제 #18
0
def cluster_with():
    log('Clustering with new node')
    if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
        cluster_cmd = 'join_cluster'
    else:
        cluster_cmd = 'cluster'
    out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
    log('cluster status is %s' % str(out))

    # check if node is already clustered
    total_nodes = 1
    running_nodes = []
    m = re.search("\{running_nodes,\[(.*?)\]\}", out.strip(), re.DOTALL)
    if m is not None:
        running_nodes = m.group(1).split(',')
        running_nodes = [x.replace("'", '') for x in running_nodes]
        total_nodes = len(running_nodes)

    if total_nodes > 1:
        log('Node is already clustered, skipping')
        return False

    # check all peers and try to cluster with them
    available_nodes = []
    for r_id in relation_ids('cluster'):
        for unit in related_units(r_id):
            if config('prefer-ipv6'):
                address = relation_get('hostname',
                                       rid=r_id, unit=unit)
            else:
                address = relation_get('private-address',
                                       rid=r_id, unit=unit)
            if address is not None:
                try:
                    node = get_hostname(address, fqdn=False)
                except:
                    log('Cannot resolve hostname for {} '
                        'using DNS servers'.format(address), level='WARNING')
                    log('Falling back to use socket.gethostname()',
                        level='WARNING')
                    # If the private-address is not resolvable using DNS
                    # then use the current hostname
                    node = socket.gethostname()

                available_nodes.append(node)

    if len(available_nodes) == 0:
        log('No nodes available to cluster with')
        return False

    # iterate over all the nodes, join to the first available
    num_tries = 0
    for node in available_nodes:
        log('Clustering with remote rabbit host (%s).' % node)
        if node in running_nodes:
            log('Host already clustered with %s.' % node)
            return False

        try:
            cmd = [RABBITMQ_CTL, 'stop_app']
            subprocess.check_call(cmd)
            cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
            subprocess.check_call(cmd)
            cmd = [RABBITMQ_CTL, 'start_app']
            subprocess.check_call(cmd)
            log('Host clustered with %s.' % node)
            return True
        except:
            log('Failed to cluster with %s.' % node)
        # continue to the next node
        num_tries += 1
        if num_tries > config('max-cluster-tries'):
            log('Max tries number exhausted, exiting', level=ERROR)
            raise

    return False