def create_fence_nova_device(first_controller_node_ip, domainname):
    LOG.info("Create a seperate fence-nova stonith device.")

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs stonith create fence-nova fence_compute" + " auth-url=" +
        oc_auth_url + " login="******" passwd=" + oc_password +
        " tenant-name=" + oc_tenant_name + " domain=" + domainname +
        " record-only=1 op monitor interval=60s timeout=180s --force")

    # 15) Configure the required constraints for fence-nova
    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs constraint location" +
        " fence-nova rule resource-discovery=never score=0" +
        " osprole eq controller")

    ssh_cmd(
        first_controller_node_ip, "heat-admin", "sudo pcs constraint order" +
        " promote galera-master then fence-nova require-all=false")

    ssh_cmd(
        first_controller_node_ip, "heat-admin", "sudo pcs constraint order" +
        " start fence-nova then nova-compute-clone")
Beispiel #2
0
def create_flavors():
    print 'Creating overcloud flavors...'

    flavors = [{
        "id": "1",
        "name": "m1.tiny",
        "memory": 512,
        "disk": 1,
        "cpus": 1
    }, {
        "id": "2",
        "name": "m1.small",
        "memory": 2048,
        "disk": 20,
        "cpus": 1
    }, {
        "id": "3",
        "name": "m1.medium",
        "memory": 4096,
        "disk": 40,
        "cpus": 2
    }, {
        "id": "4",
        "name": "m1.large",
        "memory": 8192,
        "disk": 80,
        "cpus": 4
    }, {
        "id": "5",
        "name": "m1.xlarge",
        "memory": 16384,
        "disk": 160,
        "cpus": 8
    }]

    os_auth_url, os_tenant_name, os_username, os_password = \
        CredentialHelper.get_overcloud_creds()

    kwargs = {
        'username': os_username,
        'password': os_password,
        'auth_url': os_auth_url,
        'project_id': os_tenant_name
    }
    n_client = nova_client.Client(2, **kwargs)

    existing_flavor_ids = []
    for existing_flavor in n_client.flavors.list(detailed=False):
        existing_flavor_ids.append(existing_flavor.id)

    for flavor in flavors:
        if flavor["id"] not in existing_flavor_ids:
            print '    Creating ' + flavor["name"]
            n_client.flavors.create(flavor["name"],
                                    flavor["memory"],
                                    flavor["cpus"],
                                    flavor["disk"],
                                    flavorid=flavor["id"])
        else:
            print '    Flavor ' + flavor["name"] + " already exists"
Beispiel #3
0
    def create_aggregate(self):
        UC_AUTH_URL, UC_PROJECT_ID, UC_USERNAME, UC_PASSWORD = \
            CredentialHelper.get_overcloud_creds()
        # Create nova client object
        nova = nvclient.Client(
            2,
            UC_USERNAME,
            UC_PASSWORD,
            UC_PROJECT_ID,
            UC_AUTH_URL)
        hostname_list = self.get_dell_compute_nodes_hostnames(nova)
        self.edit_aggregate_environment_file(
            hostname_list)
        env_opts = \
            " -e ~/pilot/templates/create_aggregate_environment.yaml"

        cmd = self.overcloudrc + "openstack stack create " \
            " Dell_Aggregate" \
            " --template" \
            " ~/pilot/templates/createaggregate.yaml" \
            " {}" \
            "".format(env_opts)
        aggregate_create_status = os.system(cmd)
        if aggregate_create_status == 0:
            logger.info("Dell_Aggregate created")
        else:
            raise Exception(
                "Aggregate {} could not be created..."
                " Exiting post deployment tasks")
def create_nova_evacuate_resource(first_controller_node_ip, domainname):
    LOG.info("Create the nova-evacuate active/passive resource.")

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs resource create nova-evacuate" +
        " ocf:openstack:NovaEvacuate auth_url=" + oc_auth_url + " username="******" password="******" tenant_name=" +
        oc_tenant_name + " domain=" + domainname +
        " op monitor interval=60s timeout=240s --force")
def populate_compute_nodes_resources(first_controller_node_ip, domainname):
    LOG.info("Populate the compute node resources within pacemaker.")

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs resource create nova-compute-checkevacuate" +
        " ocf:openstack:nova-compute-wait auth_url=" + oc_auth_url +
        " username="******" password="******" tenant_name=" + oc_tenant_name + " domain=" + domainname +
        " op start timeout=300 --clone interleave=true --disabled --force")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs constraint location nova-compute-checkevacuate-clone" +
        " rule resource-discovery=exclusive score=0 osprole eq compute")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs resource create nova-compute" +
        " systemd:openstack-nova-compute" +
        " op start timeout=200s op stop timeout=200s" +
        " --clone interleave=true --disabled --force")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs constraint location nova-compute-clone" +
        " rule resource-discovery=exclusive score=0 osprole eq compute")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs constraint order start" +
        " nova-compute-checkevacuate-clone" +
        " then nova-compute-clone require-all=true")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs constraint order start nova-compute-clone" +
        " then nova-evacuate require-all=false")
Beispiel #6
0
def finalize_overcloud():
    from os_cloud_config.utils import clients

    os_auth_url, os_tenant_name, os_username, os_password = \
        CredentialHelper.get_overcloud_creds()

    try:
        keystone_client = clients.get_keystone_client(os_username, os_password,
                                                      os_tenant_name,
                                                      os_auth_url)
    except:
        return None

    create_flavors()
    create_volume_types()

    # horizon_service = keystone_client.services.find(**{'name': 'horizon'})
    # horizon_endpoint = keystone_client.endpoints.find(
    #     **{'service_id': horizon_service.id})
    # return horizon_endpoint.publicurl
    return None
Beispiel #7
0
def delete_compute_nodes_resources(first_controller_node_ip):
    LOG.info("Delete the compute node resources within pacemaker.")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs resource delete neutron-openvswitch-agent-compute \
            --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs stonith delete fence-nova --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete libvirtd-compute --force")

    # Then the nova-compute resource:

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete nova-compute-checkevacuate --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete nova-compute --force")
Beispiel #8
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')

    # Run update_ssh_config.py
    cmd = os.path.join(os.getcwd(), 'update_ssh_config.py')
    os.system(cmd)

    # Run identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(os.getcwd(),
                       'identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get first_controller_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/cntl0/ {print $2}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node = p2.communicate()[0].rstrip()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get first_compute_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(
        shlex.split('awk \'/nova0/ || /compute0/ {print $2}\''),
        stdin=p1.stdout,
        stdout=subprocess.PIPE)
    first_compute_node = p2.communicate()[0].rstrip()

    # Get first_compute_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova0|compute0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_compute_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()
    first_compute_nova_name = compute_nova_names[0]

    # Get CONTROLLER_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    controller_nova_names = p2.communicate()[0].split()
    first_controller_nova_name = controller_nova_names[0]

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    LOG.setLevel(args.logging_level)

    # Install RA instanceHA Configuration
    LOG.info(
        "***  Removing Instance HA for stack {}  ***".format(oc_stack_name))

    LOG.debug("home_dir: {}".format(home_dir))
    LOG.debug("oc_stack_name: {}".format(oc_stack_name))
    LOG.debug("oc_auth_url: {}".format(oc_auth_url))
    LOG.debug("oc_username: {}".format(oc_username))
    LOG.debug("oc_password: {}".format(oc_password))
    LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
    LOG.debug("first_controller_node: {}".format(first_controller_node))
    LOG.debug("first_controller_node_ip: {}".format(first_controller_node_ip))
    LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
    LOG.debug("first_compute_node: {}".format(first_compute_node))
    LOG.debug("first_compute_node_ip: {}".format(first_compute_node_ip))
    LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
    LOG.debug("compute_nova_names: {}".format(compute_nova_names))
    LOG.debug("first_compute_nova_name: {}".format(first_compute_nova_name))
    LOG.debug("controller_nova_names: {}".format(controller_nova_names))
    LOG.debug(
        "first_controller_nova_name: {}".format(first_controller_nova_name))

    cmd = "source {} ".format(undercloudrc_name)
    os.system(cmd)

    out = ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs property show stonith-enabled \
                  | awk '/stonith/ {print $2}'")
    result = out[0].rstrip()
    LOG.debug("result: {}".format(result))

    if result == 'true':
        ssh_cmd(first_controller_node_ip, "heat-admin",
                "sudo pcs property set stonith-enabled=false")
        ssh_cmd(first_controller_node_ip, "heat-admin",
                "sudo pcs property set maintenance-mode=true")

    disable_control_plane_services(first_controller_node_ip)
    delete_compute_nodeName_resource(compute_nodes_ip,
                                     first_controller_node_ip)
    delete_compute_nodes_resources(first_controller_node_ip)
    delete_compute_nodes_stonith_devices(compute_nodes_ip,
                                         first_controller_node_ip)
    delete_nova_evacuate_resource(first_controller_node_ip)
    disable_remote_pacemaker(compute_nodes_ip)
    enable_openstack_services(compute_nodes_ip)

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs property set maintenance-mode=false")
    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs property set stonith-enabled=true")
Beispiel #9
0
def main():

    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("-compute",
                       "--compute",
                       dest="compute_node_ip",
                       action="store",
                       default='')
    group.add_argument("-controller",
                       "--controller",
                       dest="controller_node_ip",
                       action="store",
                       default='')
    parser.add_argument('-f',
                        '--file',
                        help='name of json file containing the node being set',
                        default=Constants.INSTACKENV_FILENAME)
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
    instack_file = os.path.expanduser(args.file)

    # Run ~/pilot/identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(home_dir,
                       'pilot/identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get CONTROLLER_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get CONTROLLER_NODE_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    controller_node_names = p2.communicate()[0].split()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    LOG.setLevel(args.logging_level)

    LOG.debug("home_dir: {}".format(home_dir))
    LOG.debug("oc_stack_name: {}".format(oc_stack_name))
    LOG.debug("oc_auth_url: {}".format(oc_auth_url))
    LOG.debug("oc_username: {}".format(oc_username))
    LOG.debug("oc_password: {}".format(oc_password))
    LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
    LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
    LOG.debug("controller_nodes_names: {}".format(controller_nodes_ip))
    LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
    LOG.debug("compute_nova_names: {}".format(compute_nodes_ip))

    # Execute Compute node deletion
    if args.compute_node_ip != '':
        compute_node_ip = args.compute_node_ip.rstrip()
        if check_ip_validity(compute_node_ip):
            LOG.info("***  Removing a compute node {} to InstanceHA"
                     " configuration.".format(compute_node_ip))
            delete_compute_node_resources(compute_node_ip,
                                          first_controller_node_ip)
        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                compute_node_ip))
            exit(-1)

    # Execute Controller node deletion
    if args.controller_node_ip != '':
        controller_node_ip = args.controller_node_ip.rstrip()
        if check_ip_validity(controller_node_ip):
            LOG.info("***  Removing a controller node {} to InstanceHA"
                     " configuration.".format(controller_node_ip))
            LOG.debug("controller_node_ip: {}".format(controller_node_ip))
            delete_controller_node_resources(controller_node_ip,
                                             first_controller_node_ip)
        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                controller_node_ip))
            exit(-1)
def main():

    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("-compute",
                       "--compute",
                       dest="compute_node_ip",
                       action="store",
                       default='')
    group.add_argument("-controller",
                       "--controller",
                       dest="controller_node_ip",
                       action="store",
                       default='')
    parser.add_argument('-f',
                        '--file',
                        help='name of json file containing the node being set',
                        default=Constants.INSTACKENV_FILENAME)
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
    instack_file = os.path.join(home_dir, args.file)

    # Run update_ssh_config.py
    cmd = os.path.join(os.getcwd(), 'update_ssh_config.py')
    os.system(cmd)

    # Run identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(os.getcwd(),
                       'identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get first_controller_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/cntl0/ {print $2}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node = p2.communicate()[0].rstrip()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get first_compute_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(
        shlex.split('awk \'/nova0/ || /compute0/ {print $2}\''),
        stdin=p1.stdout,
        stdout=subprocess.PIPE)
    first_compute_node = p2.communicate()[0].rstrip()

    # Get first_compute_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova0|compute0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_compute_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    domainname = get_domainname(first_compute_node_ip)

    LOG.setLevel(args.logging_level)

    # Install RA instanceHA Configuration
    if args.compute_node_ip == '' and args.controller_node_ip == '':
        LOG.info("***  Configuring Instance HA for stack {}  ***".format(
            oc_stack_name))

        LOG.debug("home_dir: {}".format(home_dir))
        LOG.debug("oc_stack_name: {}".format(oc_stack_name))
        LOG.debug("oc_auth_url: {}".format(oc_auth_url))
        LOG.debug("oc_username: {}".format(oc_username))
        LOG.debug("oc_password: {}".format(oc_password))
        LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
        LOG.debug("first_controller_node: {}".format(first_controller_node))
        LOG.debug(
            "first_controller_node_ip: {}".format(first_controller_node_ip))
        LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
        LOG.debug("first_compute_node: {}".format(first_compute_node))
        LOG.debug("first_compute_node_ip: {}".format(first_compute_node_ip))
        LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
        LOG.debug("compute_nova_names: {}".format(compute_nova_names))
        LOG.debug("domainname: {}".format(domainname))

        if (verify_fencing(first_controller_node_ip) != "false"):
            LOG.debug("Stonith is enabled.")
        else:
            LOG.critical("!!! - Error: Fencing must be enabled.")
            LOG.info("Use agent_fencing.sh script to enable fencing.")
            sys.exit(-1)

        stop_disable_openstack_services(compute_nodes_ip)
        create_authkey(first_compute_node_ip)
        distribute_all_authkey(compute_nodes_ip, controller_nodes_ip)
        enable_start_pacemaker(compute_nodes_ip)
        create_nova_evacuate_resource(first_controller_node_ip, domainname)
        confirm_nova_evacuate_resource(first_controller_node_ip)
        tag_controllers_with_osprole(first_controller_node_ip)
        tag_the_control_plane(first_controller_node_ip)
        populate_compute_nodes_resources(first_controller_node_ip, domainname)
        add_compute_nodes_stonith_devices(compute_nodes_ip, undercloud_config,
                                          first_controller_node_ip,
                                          instack_file)
        create_fence_nova_device(first_controller_node_ip, domainname)
        enable_compute_nodes_recovery(first_controller_node_ip)
        create_compute_nodes_resources(compute_nodes_ip,
                                       first_controller_node_ip)
        enable_control_plane_services(first_controller_node_ip)
        final_resource_cleanup(first_controller_node_ip)

    # Execute Compute node addition
    if args.compute_node_ip != '':
        compute_node_ip = args.compute_node_ip.rstrip()
        if check_ip_validity(compute_node_ip):
            LOG.info("***  Adding a compute node {} to InstanceHA"
                     " configuration.".format(compute_node_ip))

            LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
            LOG.debug("compute_node_ip: {}".format(compute_node_ip))
            LOG.debug("first_controller_node_ip: {}".format(
                first_controller_node_ip))
            LOG.debug("undercloud_config: {}".format(undercloud_config))
            LOG.debug("instack_file: {}".format(instack_file))

            stop_disable_openstack_services(compute_nodes_ip)
            distribute_node_authkey(compute_node_ip)
            enable_start_compute_pacemaker(compute_node_ip)
            add_compute_node_stonith_devices(compute_node_ip,
                                             undercloud_config,
                                             first_controller_node_ip,
                                             instack_file)
            create_compute_node_resources(compute_node_ip,
                                          first_controller_node_ip)
            enable_control_plane_services(first_controller_node_ip)
            final_resource_cleanup(first_controller_node_ip)

        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                compute_node_ip))
            exit(-1)

    # Execute Controller node addition
    if args.controller_node_ip != '':
        controller_node_ip = args.controller_node_ip.rstrip()
        if check_ip_validity(controller_node_ip):
            LOG.info("***  Adding a controller node {} to InstanceHA"
                     " configuration.".format(controller_node_ip))

            LOG.debug("controller_node_ip: {}".format(controller_node_ip))
            LOG.debug("first_controller_node_ip: {}".format(
                first_controller_node_ip))

            distribute_node_authkey(controller_node_ip)
            tag_controllers_with_osprole(first_controller_node_ip)
            final_resource_cleanup(first_controller_node_ip)

        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                controller_node_ip))
            sys.exit(-1)