Esempio n. 1
0
def deploy_undercloud(setts, sah_node, tester, director_vm):
    director_ip = setts.director_node.public_api_ip
    Ssh.execute_command(director_ip, "root", setts.director_node.root_password,
                        "subscription-manager remove --all")
    Ssh.execute_command(director_ip, "root", setts.director_node.root_password,
                        "subscription-manager unregister")
    sah_node.delete_director_vm()

    logger.info("=== create the director vm")
    sah_node.create_director_vm()
    tester.director_vm_health_check()

    logger.info("Preparing the Director VM")
    director_vm.apply_internal_repos()

    logger.debug("===  Uploading & configuring undercloud.conf . "
                 "environment yaml ===")
    director_vm.upload_update_conf_files()

    logger.info("=== installing the director & undercloud ===")
    director_vm.inject_ssh_key()
    director_vm.upload_cloud_images()
    director_vm.install_director()
    _is_failed, _error = tester.verify_undercloud_installed()
    if _is_failed:
        raise _error
Esempio n. 2
0
def deploy_powerflex_gw(setts, sah_node, tester, powerflexgw_vm):
    powerflexgw_ip = setts.powerflexgw_vm.public_api_ip
    Ssh.execute_command(powerflexgw_ip, "root",
                        setts.powerflexgw_vm.root_password,
                        "subscription-manager remove --all")
    Ssh.execute_command(powerflexgw_ip, "root",
                        setts.powerflexgw_vm.root_password,
                        "subscription-manager unregister")
    logger.info("=== deleting any existing powerflex gateway vm")
    sah_node.delete_powerflexgw_vm()

    logger.info("=== Creating the powerflex gateway vm")
    sah_node.create_powerflexgw_vm()
    tester.powerflexgw_vm_health_check()
    logger.info("Installing the powerflex gateway UI")
    powerflexgw_vm.upload_rpm()
    powerflexgw_vm.install_gateway()
    logger.info("Configuring the powerflex gateway vm")
    powerflexgw_vm.configure_gateway()
    logger.info("Retrieving and injecting SSL certificates")
    powerflexgw_vm.get_ssl_certificates()
    powerflexgw_vm.inject_ssl_certificates()
    logger.info("Restarting the gateway and cinder-volume")
    powerflexgw_vm.restart_gateway()
    powerflexgw_vm.restart_cinder_volume()
Esempio n. 3
0
 def verify_computes_virtualization_enabled(self):
     logger.debug("*** Verify the Compute nodes have KVM enabled *** ")
     cmd = "source ~/stackrc;nova list | grep compute"
     ssh_opts = (
         "-o StrictHostKeyChecking=no "
         "-o UserKnownHostsFile=/dev/null "
         "-o KbdInteractiveDevices=no")
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd,
                                  cmd)
     computes = re[0].split("\n")
     computes.pop()
     for each in computes:
         provisioning_ip = each.split("|")[6].split("=")[1]
         cmd = "ssh %s heat-admin@%s 'ls -al /dev/kvm'" % (
             ssh_opts, provisioning_ip)
         re = Ssh.execute_command_tty(
             self.director_ip,
             self.settings.director_install_account_user,
             self.settings.director_install_account_pwd, cmd)
         if "No such file" in re[0]:
             raise AssertionError(
                 "KVM Not running on Compute node '{}' -"
                 " make sure the node has been DTK'ed/Virtualization "
                 "enabled in the Bios".format(
                     provisioning_ip))
Esempio n. 4
0
        def complete_bootstrap_process(self):
            logger.info("Wait for the bootstrap node services to be up")
            cmd = 'ssh -t root@localhost "sudo su - core -c \' ssh -o \\"StrictHostKeyChecking no \\" bootstrap journalctl | grep \'bootkube.service complete\'\'"'
            bBootStrapReady = False
            while bBootStrapReady is False:
                journal =  Ssh.execute_command_tty("localhost",
                                                   "root",
                                                   self.settings.csah_root_pwd,
                                                   cmd)
                if 'bootkube.service complete' in str(journal):
                    bBootStrapReady = True
                    logger.info("Bootstrap node ready")
                else:
                    logger.debug("Waiting for bootstrap node to finish initializing services..")
                    time.sleep(30)

            logger.info("Complete the bootstrap process")
            cmd = 'ssh -t root@localhost "sudo su - core -c \' ./openshift-install --dir=openshift wait-for bootstrap-complete --log-level debug\'"'
            re =  Ssh.execute_command_tty("localhost",
                                          "root",
                                          self.settings.csah_root_pwd,
                                          cmd)
            cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get nodes\'"'
            re =  Ssh.execute_command_tty("localhost",
                                          "root",
                                          self.settings.csah_root_pwd,
                                          cmd)
Esempio n. 5
0
 def cleanup_sah(self):
     logger.info("- Clean up any existing installation  ")
     cmds = [
         ' killall -u core', 'userdel -r core',
         'rm -rf /var/lib/tftpboot/uefi/*'
     ]
     for cmd in cmds:
         Ssh.execute_command("localhost", "root",
                             self.settings.csah_root_pwd, cmd)
Esempio n. 6
0
    def verify_undercloud_installed(self):
        logger.debug("Verify the undercloud installed properly")
        setts = self.settings
        cmd = "stat ~/stackrc"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "No such file or directory" in re[0]:
            _error = AssertionError("Director & Undercloud did not install "
                                    "properly, no ~/stackrc found, check "
                                    "/pilot/install-director.log "
                                    "for details")

            return True, _error

        cmd = ("grep \"The Undercloud has been successfully installed\" "
               + "~/pilot/install-director.log")

        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "The Undercloud has been successfully installed" not in re[0]:
            _error = AssertionError("Director & Undercloud did not install "
                                    "properly, log does not indicate a "
                                    "successful director installation, check "
                                    "/pilot/install-director.log for details")
            return True, _error

        cmd = "cat "\
              "~/pilot/install-director.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "There are no enabled repos" in re[0]:
            _error = AssertionError("Unable to attach to pool ID while "
                                    "updating the overcloud image")
            return True, _error

        cmd = "source ~/stackrc;glance image-list"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "overcloud-full" not in re[0]:
            _error = AssertionError("Unable to find the overcloud image "
                                    "in glance - check the "
                                    "install-director.log for possible "
                                    "package download errors")
            return True, _error

        logger.info("Undercloud installed Successfully!")

        return False, None
Esempio n. 7
0
    def verify_backends_connectivity(self):
        dellsc_be = self.settings.enable_dellsc_backend
        if dellsc_be:
            setts = self.settings
            cmd = "source ~/stackrc;nova list | grep compute"
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            ls = re[0].split("\n")
            ls.pop()
            compute_node_ip = ls[0].split("|")[6].split("=")[1]

            cmd = "source ~/stackrc;nova list | grep controller"
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            ls = re[0].split("\n")
            ls.pop()
            controller_node_ip = ls[0].split("|")[6].split("=")[1]

        if self.settings.enable_dellsc_backend:
            logger.debug("Verifying dellsc backend connectivity")

            logger.debug("Verify Controller nodes can ping the san ip")
            cmd = "ssh heat-admin@" + controller_node_ip +\
                  " sudo ping " + self.settings.dellsc_san_ip +\
                  " -c 1 -w 30 "
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            if self.ping_success not in re[0]:
                raise AssertionError(controller_node_ip +
                                     " cannot ping the dellsc san ip " +
                                     self.settings.dellsc_san_ip)

            logger.debug("Verify Make sure ISCSI access work from Compute \
                         & Controller nodes")
            for each in compute_node_ip, controller_node_ip:
                cmd = "ssh heat-admin@" + each +\
                      " sudo iscsiadm -m discovery -t sendtargets -p " +\
                      self.settings.dellsc_iscsi_ip_address +\
                      ":" + self.settings.dellsc_iscsi_port
                re = Ssh.execute_command_tty(
                                   self.director_ip,
                                   setts.director_install_account_user,
                                   setts.director_install_account_pwd,
                                   cmd)
                if "com.compellent" not in re[0]:
                    raise AssertionError(
                                   each +
                                   " not able to validate ISCSI access to " +
                                   self.settings.dellsc_iscsi_ip_address +
                                   ":" + self.settings.dellsc_iscsi_port)
Esempio n. 8
0
def deploy():
    logger.debug("=================================")
    logger.info("=== Starting up ...")
    logger.debug("=================================")

    settings, args = get_settings()

    settings = load_settings()

    csah = CSah()
    # CSah healthChecks
    csah.power_off_cluster_nodes()
    csah.cleanup_sah()
    csah.delete_bootstrap_vm()

    csah.run_playbooks()
    csah.create_bootstrap_vm()
    csah.wait_for_bootstrap_ready()

    csah.pxe_boot_controllers()
    csah.wait_for_controllers_ready()

    csah.complete_bootstrap_process()
    csah.pxe_boot_computes()
    csah.wait_for_operators_ready()

    sys.exit(1)

    time.sleep(350)

    logger.info(" - Wait for all operators to be available")
    #cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get clusteroperators\'"'
    bOperatorsReady = False
    while bOperatorsReady is False:
        cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get csr -o name | xargs oc adm certificate approve\'"'
        Ssh.execute_command_tty("localhost", "root", "Dell0SS!", cmd)
        cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get clusteroperators\'"'

        re = Ssh.execute_command_tty("localhost", "root", "Dell0SS!", cmd)
        logger.debug(str(re))
        notReady = []
        ls = str(re).split('\\r\\')
        for each in ls:
            if "False" in each.split()[2].strip():
                notReady.append(each.split()[0].strip())
        if len(notReady) > 0:
            logger.debug(" Operators still not ready : " + str(notReady))
            time.sleep(120)
        else:
            logger.info(" All operators are up & running ")
            bOperatorsReady = True
    logger.info("- Done")
Esempio n. 9
0
    def verify_undercloud_installed(self):
        logger.debug("Verify the undercloud installed properly")
        cmd = "stat ~/stackrc"
        setts = self.settings
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "No such file or directory" in re[0]:
            raise AssertionError(
                "Director & Undercloud did not install properly, "
                "check /pilot/install-director.log for details")
        cmd = " grep \"Undercloud install complete\" " \
              "~/pilot/install-director.log"
        setts = self.settings
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "Undercloud install complete." not in re[0]:
            raise AssertionError(
                "Director & Undercloud did not install properly,"
                " check /pilot/install-director.log for details")

        cmd = "cat "\
              "~/pilot/install-director.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "There are no enabled repos" in re[0]:
            raise AssertionError(
                "Unable to attach to pool ID while updating the overcloud\
                image")

        cmd = "source ~/stackrc;glance image-list"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "overcloud-full" not in re[0]:
            raise AssertionError(
                "Unable to find the overcloud image in glance - "
                "check the install-director.log for possible package"
                "download errors")

        cmd = "cat "\
              " /etc/rhosp-release"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        logger.debug("Undercloud version : " + re[0])
Esempio n. 10
0
 def verify_introspection_sucessfull_edge(self, node_type):
     node_type_count = len(self.settings.node_types_map[node_type])
     cmd = ("source ~/stackrc;openstack baremetal node list "
            "--fields uuid properties provision_state -f json")
     setts = self.settings
     stdout, stderr, exit_status = Ssh.execute_command_tty(
         self.director_ip, setts.director_install_account_user,
         setts.director_install_account_pwd, cmd)
     nodes = json.loads(stdout)
     introspected_node_type_count = 0
     for node in nodes:
         props = node["Properties"]
         uuid = node["UUID"]
         state = node["Provisioning State"]
         if ("node_type" in props and props["node_type"] == node_type
                 and state == "available"):
             introspected_node_type_count += 1
     if node_type_count != introspected_node_type_count:
         raise AssertionError(
             "Expected number of nodes introspected for node type: "
             "{}, does not match, expected: {}, "
             "introspected: {}".format(node_type, str(node_type_count),
                                       str(introspected_node_type_count)))
     logger.info(
         "Validated node type: %s, "
         "has the correct number of nodes introspected: %s", node_type,
         introspected_node_type_count)
Esempio n. 11
0
 def verify_nodes_registered_in_ironic_edge(self, node_type):
     expected_node_type_count = len(self.settings.node_types_map[node_type])
     cmd = ("source ~/stackrc;openstack baremetal node list "
            "--fields properties -f json")
     setts = self.settings
     stdout, stderr, exit_status = Ssh.execute_command_tty(
         self.director_ip,
         setts.director_install_account_user,
         setts.director_install_account_pwd,
         cmd)
     nodes = json.loads(stdout)
     registered_node_type_count = 0
     for node in nodes:
         props = node["Properties"]
         if "node_type" in props and props["node_type"] == node_type:
             registered_node_type_count += 1
     if expected_node_type_count != registered_node_type_count:
         raise AssertionError(
             "Expected number of nodes registered in Ironic for node type: "
             "{}, does not match, expected: {}, "
             "imported: {}".format(node_type,
                                   str(expected_node_type_count),
                                   str(registered_node_type_count)))
     logger.info("Validated node type: %s, "
                 "has the correct number of nodes imported "
                 "into Ironic: %s", node_type, registered_node_type_count)
Esempio n. 12
0
 def run_ssh_edit(self, remotefile, find, replace):
     return Ssh.ssh_edit_file(self.ip,
                              self.user,
                              self.pwd,
                              remotefile,
                              find,
                              replace)
Esempio n. 13
0
 def run_playbooks(self):
     logger.info("- Run ansible playbook to generate ignition files etc")
     logfile = logger.handlers[0].baseFilename
     cmd = 'export ANSIBLE_LOG_PATH=' + logfile + '; ansible-playbook -i generated_inventory haocp.yaml'
     subprocess.call(cmd,
                     shell=True,
                     cwd='/home/ansible/JetPack/src/pilot/ansible')
     logger.info("Updating the dns settings")
     cmds = [
         'nmcli connection modify br0 ipv4.dns ' +
         self.settings.csah_node.os_ip,
         'sed -i "s/nameserver.*/nameserver ' +
         self.settings.csah_node.os_ip + '/" /etc/resolv.conf'
     ]
     for cmd in cmds:
         Ssh.execute_command("localhost", "root",
                             self.settings.csah_root_pwd, cmd)
Esempio n. 14
0
 def configure_ntp(self):
     logger.info("Customizing NTP configuration on all OpenShift nodes")
     sets = self.settings
     FileHelper.replace_expression(self.chrony_file, 'CSAH_IP',
                                   sets.csah_node.os_ip)
     data = open(self.chrony_file, "r").read()
     encoded = base64.b64encode(data.encode('utf-8'))
     FileHelper.replace_expression(self.worker_ntp_manifest,
                                   'CHRONY_BASE64', encoded.decode())
     FileHelper.replace_expression(self.master_ntp_manifest,
                                   'CHRONY_BASE64', encoded.decode())
     cmd = 'su - core -c \' oc apply -f ' + self.master_ntp_manifest + '\''
     re = Ssh.execute_command_tty("localhost", "root",
                                  self.settings.csah_root_pwd, cmd)
     cmd = 'su - core -c \' oc apply -f ' + self.worker_ntp_manifest + '\''
     re = Ssh.execute_command_tty("localhost", "root",
                                  self.settings.csah_root_pwd, cmd)
Esempio n. 15
0
 def create_bootstrap_vm(self):
     logger.info("- Create the bootstrap VM")
     bootstrap_mac = self.get_inventory(
     )['all']['vars']['bootstrap_node'][0]['mac']
     cmd = 'virt-install --name bootstrapkvm --ram 20480 --vcpu 8 --disk path=/home/bootstrapvm-disk.qcow2,format=qcow2,size=20 --os-variant generic --network=bridge=br0,model=virtio,mac=' + bootstrap_mac + ' --pxe --boot uefi,hd,network --noautoconsole --autostart &'
     re = Ssh.execute_command("localhost", "root",
                              self.settings.csah_root_pwd, cmd)
     time.sleep(320)
Esempio n. 16
0
    def verify_subscription_status(public_api_ip, user, password, retries):
        i = 0

        subscription_status = Ssh.execute_command(
            public_api_ip, user, password, "subscription-manager status")[0]

        while "Current" not in subscription_status and i < retries:
            if "Unknown" in subscription_status:
                return subscription_status
            time.sleep(60)
            subscription_status = \
                Ssh.execute_command(public_api_ip,
                                    user,
                                    password,
                                    "subscription-manager status")[0]
            i += 1
        return subscription_status
Esempio n. 17
0
def deploy_powerflex_mgmt(setts, sah_node, tester, powerflexmgmt_vm):
    powerflexmgmt_ip = setts.powerflexmgmt_vm.public_api_ip
    Ssh.execute_command(powerflexmgmt_ip, "root",
                        setts.powerflexmgmt_vm.root_password,
                        "subscription-manager remove --all")
    Ssh.execute_command(powerflexmgmt_ip, "root",
                        setts.powerflexmgmt_vm.root_password,
                        "subscription-manager unregister")
    logger.info("=== deleting any existing powerflex presentation server vm")
    sah_node.delete_powerflexmgmt_vm()

    logger.info("=== Creating the powerflex presentation server vm")
    sah_node.create_powerflexmgmt_vm()
    tester.powerflexmgmt_vm_health_check()
    logger.info("Installing the powerflex presentation server UI")
    powerflexmgmt_vm.upload_rpm()
    powerflexmgmt_vm.install_presentation_server()
Esempio n. 18
0
    def sah_health_check(self):

        logger.info("SAH node health check")
        if self.verify_rhsm_status:
            logger.debug("*** Verify the SAH node registered properly ***")
            for _ in range(60):
                subscription_status = self.verify_subscription_status(
                    self.sah_ip, "root", self.settings.sah_node.root_password,
                    self.settings.subscription_check_retries)
                if "Current" in subscription_status:
                    break
                time.sleep(2)
            else:
                raise AssertionError("SAH did not register properly : " +
                                     subscription_status)

        logger.debug("*** Verify the SAH can ping its public gateway")
        gateway = self.settings.public_api_gateway
        test = self.ping_host(self.sah_ip, "root",
                              self.settings.sah_node.root_password, gateway)
        if self.ping_success not in test:
            raise AssertionError("SAH cannot ping its public gateway : " +
                                 test)

        logger.debug("*** Verify the SAH can ping the outside world (ip)")
        test = self.ping_host(self.sah_ip, "root",
                              self.settings.sah_node.root_password, "8.8.8.8")
        if self.ping_success not in test:
            raise AssertionError("SAH cannot ping the outside world (ip) : " +
                                 test)

        logger.debug("*** Verify the SAH can ping the outside world (dns)")
        test = self.ping_host(self.sah_ip, "root",
                              self.settings.sah_node.root_password,
                              "google.com")
        if self.ping_success not in test:
            raise AssertionError("SAH cannot ping the outside world (dns) : " +
                                 test)

        logger.debug("*** Verify the SAH can ping the idrac network")
        test = self.ping_host(self.sah_ip, "root",
                              self.settings.sah_node.root_password,
                              self.settings.sah_node.idrac_ip)
        if self.ping_success not in test:
            raise AssertionError("SAH cannot ping idrac networkn (ip) : " +
                                 test)

        logger.debug("*** Verify the SAH has KVM enabled *** ")
        cmd = 'ls -al /dev/kvm'
        if "No such file" in \
                Ssh.execute_command(self.sah_ip,
                                    "root",
                                    self.settings.sah_node.root_password,
                                    cmd)[1]:
            raise AssertionError(
                "KVM Not running on the SAH node - make sure "
                "the node has been DTK'ed/Virtualization enabled "
                "in the Bios")
Esempio n. 19
0
    def wait_for_vm_to_come_up(self, target_ip, user, password):
        while True:
            status = Ssh.execute_command(target_ip, user, password, "ps")[0]

            if status != "host not up":
                break

            logger.debug("vm is not up.  Sleeping...")
            time.sleep(10)
Esempio n. 20
0
 def ping_host(self, external_ip, user, passwd, target_host):
     for i in range(1, 30):
         ping_status = Ssh.execute_command(
             external_ip, user, passwd,
             "ping " + target_host + " -c 1 -w 30 ")[0]
         if self.ping_success in ping_status:
             logger.debug("Ping {} successful on attempt #{}".format(
                 target_host, i))
             break
     # noinspection PyUnboundLocalVariable
     return ping_status
Esempio n. 21
0
    def wait_for_vm_to_go_down(self, target_ip, user, password):
        while True:
            status = Ssh.execute_command(
                target_ip,
                user,
                password,
                "ps")[0]

            if status == "host not up":
                break
            time.sleep(5)
Esempio n. 22
0
 def delete_bootstrap_vm(self):
     logger.info(" Destroy any existing bootstrap Vm")
     cmd = 'virsh list --all'
     bBoostrapDestroyed = False
     while bBoostrapDestroyed is False:
         re = Ssh.execute_command("localhost",
                                  "root",
                                  self.settings.csah_root_pwd,
                                  cmd)
         if 'bootstrap' in str(re):
             cmds = [
                 'virsh undefine --nvram "bootstrapkvm"',
                 'virsh destroy bootstrapkvm']
             for cm in cmds:
                 Ssh.execute_command("localhost",
                                     "root",
                                     self.settings.csah_root_pwd,
                                     cm)
         else:
             bBoostrapDestroyed = True
Esempio n. 23
0
 def wait_for_operators_ready(self):
     logger.info(" - Wait for all operators to be available")
     bOperatorsReady = False
     while bOperatorsReady is False:
         cmd = 'su - core -c \'oc get csr -o name | xargs oc adm certificate approve\''
         Ssh.execute_command_tty("localhost", "root",
                                 self.settings.csah_root_pwd, cmd)
         cmd = 'su - core -c \'oc get clusteroperators\''
         re = Ssh.execute_command_tty("localhost", "root",
                                      self.settings.csah_root_pwd, cmd)
         notReady = []
         ls = str(re).split('\\r\\')
         for each in ls:
             if "False" in each.split()[2].strip():
                 notReady.append(each.split()[0].strip())
         if len(notReady) > 0:
             logger.debug(" Operators still not ready : " + str(notReady))
             time.sleep(120)
         else:
             logger.info(" All operators are up & running ")
             bOperatorsReady = True
Esempio n. 24
0
    def verify_overcloud_deployed(self):
        logger.debug("Verify the overcloud installed properly")
        setts = self.settings
        overcloud_name = setts.overcloud_name

        # Verify the overcloud RC file was created
        cmd = "test -f ~/" + overcloud_name + "rc; echo $?;"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        # Have to strip non-printing chars as re[0] contains newline '\n'
        _resp = re[0].strip()
        # If director is turned off or not deployed
        # re[0] == "host not up", handle this case by checking len(re[0])
        is_conf = not bool(int(_resp)) if len(_resp) == 1 else False
        if is_conf is False:
            msg = ("Overcloud RC file missing, either the overcloud has not "
                   "been deployed yet or there was an issue during "
                   "the deployment, such as Director VM being down or a heat "
                   "stack deployment failure")
            logger.warning(msg)
            return True, AssertionError(msg)

        # Check log for successful deployment
        success = "Overcloud Deployed"
        cmd = "grep \"" + success + "\" " + "~/pilot/overcloud_deploy_out.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if success not in re[0]:
            msg = ("Overcloud did not install successfully, "
                   "check ~/pilot/overcloud_deploy_out.log")
            logger.warning(msg)
            return True, AssertionError(msg)

        else:
            logger.info("Overcloud install successful")
            return False, None
Esempio n. 25
0
 def verify_pools_attached(ip_addr, user, password, logfile):
     # check the xxxxx-posts.log for pool id's/repo's related errors.
     log_out = \
         Ssh.execute_command(ip_addr, user, password, "cat " + logfile)[0]
     error1 = 'No subscriptions are available from the pool with'
     error2 = 'Removed temporarly as this error will show when ' \
              'not pulling from the cdn but internal repos'
     error3 = 'Could not find an OpenStack pool to attach to'
     if error1 in log_out or error2 in log_out or error3 in log_out:
         logger.info("*** post install log ***")
         logger.info(log_out)
         return False
     return True
Esempio n. 26
0
 def wait_for_bootstrap_ready(self):
     bBootstrap_ready = False
     while bBootstrap_ready is False:
         cmd = 'sudo su - core -c \'ssh -o "StrictHostKeyChecking no " bootstrap sudo ss -tulpn | grep -E "6443|22623|2379"\''
         openedPorts = Ssh.execute_command_tty("localhost", "root",
                                               self.settings.csah_root_pwd,
                                               cmd)
         if ("22623" in str(openedPorts)) and (
                 "2379" in str(openedPorts)) and ("6443"
                                                  in str(openedPorts)):
             logger.info(" ,, boostrap UP! ")
             bBootstrap_ready = True
         re = Ssh.execute_command("localhost", "root",
                                  self.settings.csah_root_pwd,
                                  "virsh list --all | grep bootstrapkvm")[0]
         if "shut off" in re:
             bPXe_complete = True
             logger.info("- Powering on the bootstrap VM")
             Ssh.execute_command("localhost", "root",
                                 self.settings.csah_root_pwd,
                                 "virsh start bootstrapkvm")
         time.sleep(60)
     logger.info("- Bootstrap VM is ready")
Esempio n. 27
0
 def verify_introspection_sucessfull(self):
     logger.debug("Verify the introspection did not encounter any errors")
     cmd = "source ~/stackrc;openstack baremetal node list | grep None"
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd, cmd)
     # TODO :: i fnode failed introspection - set to to PXE - reboot
     ls_nodes = re[0].split("\n")
     ls_nodes.pop()
     for node in ls_nodes:
         state = node.split("|")[5]
         if "available" not in state:
             raise AssertionError(
                 "Node state not available post bulk introspection" +
                 "\n " + re[0])
Esempio n. 28
0
 def provisioning_subnet_exists(self, subnet):
     logger.debug("Check if edge subnet {} already "
                  "exists or not".format(subnet))
     setts = self.settings
     user = setts.director_install_account_user
     ip = setts.director_node.public_api_ip
     pwd = setts.director_install_account_pwd
     is_subnet = False
     subnet_cmd = ("{} openstack subnet "
                   "show {} -c name "
                   "-f value".format(self.source_stackrc, subnet))
     sn_out = Ssh.execute_command(ip, user, pwd, subnet_cmd)[0]
     if sn_out.strip() == subnet:
         is_subnet = True
         logger.info("Subnet {} already exists".format(subnet))
     return is_subnet
Esempio n. 29
0
 def verify_nodes_registered_in_ironic(self):
     logger.debug("Verify the expected amount of nodes imported in ironic")
     cmd = "source ~/stackrc;openstack baremetal node list | grep None"
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd, cmd)
     ls_nodes = re[0].split("\n")
     ls_nodes.pop()
     expected_nodes = len(self.settings.controller_nodes) + len(
         self.settings.compute_nodes) + len(self.settings.ceph_nodes)
     if len(ls_nodes) != expected_nodes:
         raise AssertionError(
             "Expected amount of nodes registered in Ironic "
             "does not add up " + str(len(ls_nodes)) + "/" +
             str(expected_nodes))
Esempio n. 30
0
 def wait_for_controllers_ready(self):
     logger.info("Wait for the control nodes to be ready")
     time.sleep(180)
     for node in self.settings.controller_nodes:
         bNodeReady = False
         while bNodeReady is False:
             cmd = 'ssh -t root@localhost "sudo su - core -c \' ssh -o \\"StrictHostKeyChecking no \\" ' + node.name + ' ls -lart /etc/kubernetes/manifests\'"'
             ls  = Ssh.execute_command_tty("localhost",
                                           "root",
                                           self.settings.csah_root_pwd,
                                           cmd)
             if "kube-scheduler-pod.yaml" and "kube-controller-manager-pod.yaml" and "kube-apiserver-pod.yaml" and "etcd-pod.yaml" in str(ls):
                 bNodeReady = True
                 logger.debug(node.name  + " is ready")
             else:
                 logger.debug("Waiting for" + node.name + " to be readdy...")
                 time.sleep(30)