Ejemplo n.º 1
0
 def verify_computes_virtualization_enabled(self):
     logger.debug("*** Verify the Compute nodes have KVM enabled *** ")
     cmd = "source ~/stackrc;nova list | grep compute"
     ssh_opts = (
         "-o StrictHostKeyChecking=no "
         "-o UserKnownHostsFile=/dev/null "
         "-o KbdInteractiveDevices=no")
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd,
                                  cmd)
     computes = re[0].split("\n")
     computes.pop()
     for each in computes:
         provisioning_ip = each.split("|")[6].split("=")[1]
         cmd = "ssh %s heat-admin@%s 'ls -al /dev/kvm'" % (
             ssh_opts, provisioning_ip)
         re = Ssh.execute_command_tty(
             self.director_ip,
             self.settings.director_install_account_user,
             self.settings.director_install_account_pwd, cmd)
         if "No such file" in re[0]:
             raise AssertionError(
                 "KVM Not running on Compute node '{}' -"
                 " make sure the node has been DTK'ed/Virtualization "
                 "enabled in the Bios".format(
                     provisioning_ip))
Ejemplo n.º 2
0
        def complete_bootstrap_process(self):
            logger.info("Wait for the bootstrap node services to be up")
            cmd = 'ssh -t root@localhost "sudo su - core -c \' ssh -o \\"StrictHostKeyChecking no \\" bootstrap journalctl | grep \'bootkube.service complete\'\'"'
            bBootStrapReady = False
            while bBootStrapReady is False:
                journal =  Ssh.execute_command_tty("localhost",
                                                   "root",
                                                   self.settings.csah_root_pwd,
                                                   cmd)
                if 'bootkube.service complete' in str(journal):
                    bBootStrapReady = True
                    logger.info("Bootstrap node ready")
                else:
                    logger.debug("Waiting for bootstrap node to finish initializing services..")
                    time.sleep(30)

            logger.info("Complete the bootstrap process")
            cmd = 'ssh -t root@localhost "sudo su - core -c \' ./openshift-install --dir=openshift wait-for bootstrap-complete --log-level debug\'"'
            re =  Ssh.execute_command_tty("localhost",
                                          "root",
                                          self.settings.csah_root_pwd,
                                          cmd)
            cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get nodes\'"'
            re =  Ssh.execute_command_tty("localhost",
                                          "root",
                                          self.settings.csah_root_pwd,
                                          cmd)
Ejemplo n.º 3
0
    def verify_undercloud_installed(self):
        logger.debug("Verify the undercloud installed properly")
        setts = self.settings
        cmd = "stat ~/stackrc"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "No such file or directory" in re[0]:
            _error = AssertionError("Director & Undercloud did not install "
                                    "properly, no ~/stackrc found, check "
                                    "/pilot/install-director.log "
                                    "for details")

            return True, _error

        cmd = ("grep \"The Undercloud has been successfully installed\" "
               + "~/pilot/install-director.log")

        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "The Undercloud has been successfully installed" not in re[0]:
            _error = AssertionError("Director & Undercloud did not install "
                                    "properly, log does not indicate a "
                                    "successful director installation, check "
                                    "/pilot/install-director.log for details")
            return True, _error

        cmd = "cat "\
              "~/pilot/install-director.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "There are no enabled repos" in re[0]:
            _error = AssertionError("Unable to attach to pool ID while "
                                    "updating the overcloud image")
            return True, _error

        cmd = "source ~/stackrc;glance image-list"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if "overcloud-full" not in re[0]:
            _error = AssertionError("Unable to find the overcloud image "
                                    "in glance - check the "
                                    "install-director.log for possible "
                                    "package download errors")
            return True, _error

        logger.info("Undercloud installed Successfully!")

        return False, None
Ejemplo n.º 4
0
    def verify_backends_connectivity(self):
        dellsc_be = self.settings.enable_dellsc_backend
        if dellsc_be:
            setts = self.settings
            cmd = "source ~/stackrc;nova list | grep compute"
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            ls = re[0].split("\n")
            ls.pop()
            compute_node_ip = ls[0].split("|")[6].split("=")[1]

            cmd = "source ~/stackrc;nova list | grep controller"
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            ls = re[0].split("\n")
            ls.pop()
            controller_node_ip = ls[0].split("|")[6].split("=")[1]

        if self.settings.enable_dellsc_backend:
            logger.debug("Verifying dellsc backend connectivity")

            logger.debug("Verify Controller nodes can ping the san ip")
            cmd = "ssh heat-admin@" + controller_node_ip +\
                  " sudo ping " + self.settings.dellsc_san_ip +\
                  " -c 1 -w 30 "
            re = Ssh.execute_command_tty(self.director_ip,
                                         setts.director_install_account_user,
                                         setts.director_install_account_pwd,
                                         cmd)
            if self.ping_success not in re[0]:
                raise AssertionError(controller_node_ip +
                                     " cannot ping the dellsc san ip " +
                                     self.settings.dellsc_san_ip)

            logger.debug("Verify Make sure ISCSI access work from Compute \
                         & Controller nodes")
            for each in compute_node_ip, controller_node_ip:
                cmd = "ssh heat-admin@" + each +\
                      " sudo iscsiadm -m discovery -t sendtargets -p " +\
                      self.settings.dellsc_iscsi_ip_address +\
                      ":" + self.settings.dellsc_iscsi_port
                re = Ssh.execute_command_tty(
                                   self.director_ip,
                                   setts.director_install_account_user,
                                   setts.director_install_account_pwd,
                                   cmd)
                if "com.compellent" not in re[0]:
                    raise AssertionError(
                                   each +
                                   " not able to validate ISCSI access to " +
                                   self.settings.dellsc_iscsi_ip_address +
                                   ":" + self.settings.dellsc_iscsi_port)
Ejemplo n.º 5
0
def deploy():
    logger.debug("=================================")
    logger.info("=== Starting up ...")
    logger.debug("=================================")

    settings, args = get_settings()

    settings = load_settings()

    csah = CSah()
    # CSah healthChecks
    csah.power_off_cluster_nodes()
    csah.cleanup_sah()
    csah.delete_bootstrap_vm()

    csah.run_playbooks()
    csah.create_bootstrap_vm()
    csah.wait_for_bootstrap_ready()

    csah.pxe_boot_controllers()
    csah.wait_for_controllers_ready()

    csah.complete_bootstrap_process()
    csah.pxe_boot_computes()
    csah.wait_for_operators_ready()

    sys.exit(1)

    time.sleep(350)

    logger.info(" - Wait for all operators to be available")
    #cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get clusteroperators\'"'
    bOperatorsReady = False
    while bOperatorsReady is False:
        cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get csr -o name | xargs oc adm certificate approve\'"'
        Ssh.execute_command_tty("localhost", "root", "Dell0SS!", cmd)
        cmd = 'ssh -t root@localhost "sudo su - core -c \'oc get clusteroperators\'"'

        re = Ssh.execute_command_tty("localhost", "root", "Dell0SS!", cmd)
        logger.debug(str(re))
        notReady = []
        ls = str(re).split('\\r\\')
        for each in ls:
            if "False" in each.split()[2].strip():
                notReady.append(each.split()[0].strip())
        if len(notReady) > 0:
            logger.debug(" Operators still not ready : " + str(notReady))
            time.sleep(120)
        else:
            logger.info(" All operators are up & running ")
            bOperatorsReady = True
    logger.info("- Done")
Ejemplo n.º 6
0
    def verify_undercloud_installed(self):
        logger.debug("Verify the undercloud installed properly")
        cmd = "stat ~/stackrc"
        setts = self.settings
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "No such file or directory" in re[0]:
            raise AssertionError(
                "Director & Undercloud did not install properly, "
                "check /pilot/install-director.log for details")
        cmd = " grep \"Undercloud install complete\" " \
              "~/pilot/install-director.log"
        setts = self.settings
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "Undercloud install complete." not in re[0]:
            raise AssertionError(
                "Director & Undercloud did not install properly,"
                " check /pilot/install-director.log for details")

        cmd = "cat "\
              "~/pilot/install-director.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "There are no enabled repos" in re[0]:
            raise AssertionError(
                "Unable to attach to pool ID while updating the overcloud\
                image")

        cmd = "source ~/stackrc;glance image-list"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        if "overcloud-full" not in re[0]:
            raise AssertionError(
                "Unable to find the overcloud image in glance - "
                "check the install-director.log for possible package"
                "download errors")

        cmd = "cat "\
              " /etc/rhosp-release"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        logger.debug("Undercloud version : " + re[0])
Ejemplo n.º 7
0
 def verify_nodes_registered_in_ironic_edge(self, node_type):
     expected_node_type_count = len(self.settings.node_types_map[node_type])
     cmd = ("source ~/stackrc;openstack baremetal node list "
            "--fields properties -f json")
     setts = self.settings
     stdout, stderr, exit_status = Ssh.execute_command_tty(
         self.director_ip,
         setts.director_install_account_user,
         setts.director_install_account_pwd,
         cmd)
     nodes = json.loads(stdout)
     registered_node_type_count = 0
     for node in nodes:
         props = node["Properties"]
         if "node_type" in props and props["node_type"] == node_type:
             registered_node_type_count += 1
     if expected_node_type_count != registered_node_type_count:
         raise AssertionError(
             "Expected number of nodes registered in Ironic for node type: "
             "{}, does not match, expected: {}, "
             "imported: {}".format(node_type,
                                   str(expected_node_type_count),
                                   str(registered_node_type_count)))
     logger.info("Validated node type: %s, "
                 "has the correct number of nodes imported "
                 "into Ironic: %s", node_type, registered_node_type_count)
Ejemplo n.º 8
0
 def verify_introspection_sucessfull_edge(self, node_type):
     node_type_count = len(self.settings.node_types_map[node_type])
     cmd = ("source ~/stackrc;openstack baremetal node list "
            "--fields uuid properties provision_state -f json")
     setts = self.settings
     stdout, stderr, exit_status = Ssh.execute_command_tty(
         self.director_ip, setts.director_install_account_user,
         setts.director_install_account_pwd, cmd)
     nodes = json.loads(stdout)
     introspected_node_type_count = 0
     for node in nodes:
         props = node["Properties"]
         uuid = node["UUID"]
         state = node["Provisioning State"]
         if ("node_type" in props and props["node_type"] == node_type
                 and state == "available"):
             introspected_node_type_count += 1
     if node_type_count != introspected_node_type_count:
         raise AssertionError(
             "Expected number of nodes introspected for node type: "
             "{}, does not match, expected: {}, "
             "introspected: {}".format(node_type, str(node_type_count),
                                       str(introspected_node_type_count)))
     logger.info(
         "Validated node type: %s, "
         "has the correct number of nodes introspected: %s", node_type,
         introspected_node_type_count)
Ejemplo n.º 9
0
 def configure_ntp(self):
     logger.info("Customizing NTP configuration on all OpenShift nodes")
     sets = self.settings
     FileHelper.replace_expression(self.chrony_file, 'CSAH_IP',
                                   sets.csah_node.os_ip)
     data = open(self.chrony_file, "r").read()
     encoded = base64.b64encode(data.encode('utf-8'))
     FileHelper.replace_expression(self.worker_ntp_manifest,
                                   'CHRONY_BASE64', encoded.decode())
     FileHelper.replace_expression(self.master_ntp_manifest,
                                   'CHRONY_BASE64', encoded.decode())
     cmd = 'su - core -c \' oc apply -f ' + self.master_ntp_manifest + '\''
     re = Ssh.execute_command_tty("localhost", "root",
                                  self.settings.csah_root_pwd, cmd)
     cmd = 'su - core -c \' oc apply -f ' + self.worker_ntp_manifest + '\''
     re = Ssh.execute_command_tty("localhost", "root",
                                  self.settings.csah_root_pwd, cmd)
Ejemplo n.º 10
0
 def wait_for_operators_ready(self):
     logger.info(" - Wait for all operators to be available")
     bOperatorsReady = False
     while bOperatorsReady is False:
         cmd = 'su - core -c \'oc get csr -o name | xargs oc adm certificate approve\''
         Ssh.execute_command_tty("localhost", "root",
                                 self.settings.csah_root_pwd, cmd)
         cmd = 'su - core -c \'oc get clusteroperators\''
         re = Ssh.execute_command_tty("localhost", "root",
                                      self.settings.csah_root_pwd, cmd)
         notReady = []
         ls = str(re).split('\\r\\')
         for each in ls:
             if "False" in each.split()[2].strip():
                 notReady.append(each.split()[0].strip())
         if len(notReady) > 0:
             logger.debug(" Operators still not ready : " + str(notReady))
             time.sleep(120)
         else:
             logger.info(" All operators are up & running ")
             bOperatorsReady = True
Ejemplo n.º 11
0
    def verify_overcloud_deployed(self):
        logger.debug("Verify the overcloud installed properly")
        setts = self.settings
        overcloud_name = setts.overcloud_name

        # Verify the overcloud RC file was created
        cmd = "test -f ~/" + overcloud_name + "rc; echo $?;"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        # Have to strip non-printing chars as re[0] contains newline '\n'
        _resp = re[0].strip()
        # If director is turned off or not deployed
        # re[0] == "host not up", handle this case by checking len(re[0])
        is_conf = not bool(int(_resp)) if len(_resp) == 1 else False
        if is_conf is False:
            msg = ("Overcloud RC file missing, either the overcloud has not "
                   "been deployed yet or there was an issue during "
                   "the deployment, such as Director VM being down or a heat "
                   "stack deployment failure")
            logger.warning(msg)
            return True, AssertionError(msg)

        # Check log for successful deployment
        success = "Overcloud Deployed"
        cmd = "grep \"" + success + "\" " + "~/pilot/overcloud_deploy_out.log"
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd,
                                     cmd)
        if success not in re[0]:
            msg = ("Overcloud did not install successfully, "
                   "check ~/pilot/overcloud_deploy_out.log")
            logger.warning(msg)
            return True, AssertionError(msg)

        else:
            logger.info("Overcloud install successful")
            return False, None
Ejemplo n.º 12
0
 def verify_introspection_sucessfull(self):
     logger.debug("Verify the introspection did not encounter any errors")
     cmd = "source ~/stackrc;openstack baremetal node list | grep None"
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd, cmd)
     # TODO :: i fnode failed introspection - set to to PXE - reboot
     ls_nodes = re[0].split("\n")
     ls_nodes.pop()
     for node in ls_nodes:
         state = node.split("|")[5]
         if "available" not in state:
             raise AssertionError(
                 "Node state not available post bulk introspection" +
                 "\n " + re[0])
Ejemplo n.º 13
0
 def verify_nodes_registered_in_ironic(self):
     logger.debug("Verify the expected amount of nodes imported in ironic")
     cmd = "source ~/stackrc;openstack baremetal node list | grep None"
     setts = self.settings
     re = Ssh.execute_command_tty(self.director_ip,
                                  setts.director_install_account_user,
                                  setts.director_install_account_pwd, cmd)
     ls_nodes = re[0].split("\n")
     ls_nodes.pop()
     expected_nodes = len(self.settings.controller_nodes) + len(
         self.settings.compute_nodes) + len(self.settings.ceph_nodes)
     if len(ls_nodes) != expected_nodes:
         raise AssertionError(
             "Expected amount of nodes registered in Ironic "
             "does not add up " + str(len(ls_nodes)) + "/" +
             str(expected_nodes))
Ejemplo n.º 14
0
 def wait_for_controllers_ready(self):
     logger.info("Wait for the control nodes to be ready")
     time.sleep(180)
     for node in self.settings.controller_nodes:
         bNodeReady = False
         while bNodeReady is False:
             cmd = 'ssh -t root@localhost "sudo su - core -c \' ssh -o \\"StrictHostKeyChecking no \\" ' + node.name + ' ls -lart /etc/kubernetes/manifests\'"'
             ls  = Ssh.execute_command_tty("localhost",
                                           "root",
                                           self.settings.csah_root_pwd,
                                           cmd)
             if "kube-scheduler-pod.yaml" and "kube-controller-manager-pod.yaml" and "kube-apiserver-pod.yaml" and "etcd-pod.yaml" in str(ls):
                 bNodeReady = True
                 logger.debug(node.name  + " is ready")
             else:
                 logger.debug("Waiting for" + node.name + " to be readdy...")
                 time.sleep(30)
Ejemplo n.º 15
0
 def wait_for_bootstrap_ready(self):
     bBootstrap_ready = False
     while bBootstrap_ready is False:
         cmd = 'sudo su - core -c \'ssh -o "StrictHostKeyChecking no " bootstrap sudo ss -tulpn | grep -E "6443|22623|2379"\''
         openedPorts = Ssh.execute_command_tty("localhost", "root",
                                               self.settings.csah_root_pwd,
                                               cmd)
         if ("22623" in str(openedPorts)) and (
                 "2379" in str(openedPorts)) and ("6443"
                                                  in str(openedPorts)):
             logger.info(" ,, boostrap UP! ")
             bBootstrap_ready = True
         re = Ssh.execute_command("localhost", "root",
                                  self.settings.csah_root_pwd,
                                  "virsh list --all | grep bootstrapkvm")[0]
         if "shut off" in re:
             bPXe_complete = True
             logger.info("- Powering on the bootstrap VM")
             Ssh.execute_command("localhost", "root",
                                 self.settings.csah_root_pwd,
                                 "virsh start bootstrapkvm")
         time.sleep(60)
     logger.info("- Bootstrap VM is ready")
Ejemplo n.º 16
0
    def verify_nodes_registered_in_ironic(self, node_type=None):
        logger.debug("Verify the expected nodes imported in ironic")

        cmd = ("source ~/stackrc; openstack baremetal node list "
               "--long -c UUID -c 'Driver Info' -f json")
        setts = self.settings
        re = Ssh.execute_command_tty(self.director_ip,
                                     setts.director_install_account_user,
                                     setts.director_install_account_pwd, cmd)
        ls_nodes = json.loads(re[0])
        drac_addresses = list(
            map(lambda _n: _n["Driver Info"]["drac_address"], ls_nodes))

        missing = list(
            filter(lambda x: x.idrac_ip not in drac_addresses,
                   setts.all_overcloud_nodes))

        if missing:
            raise AssertionError(
                "The following {:d} overcloud nodes defined in the "
                ".properties were not found in the ironic "
                "database {}".format(len(missing), str(missing)))
        if node_type:
            self.verify_nodes_registered_in_ironic_edge(node_type)
Ejemplo n.º 17
0
def deploy():
    ret_code = 0
    # noinspection PyBroadException
    try:

        logger.debug("=================================")
        logger.info("=== Starting up ...")
        logger.debug("=================================")

        parser = argparse.ArgumentParser(
            description='JetPack 10.x deployer')
        parser.add_argument('-s', '--settings',
                            help='ini settings file, e.g settings/acme.ini',
                            required=True)
        parser.add_argument('-undercloud_only', '--undercloud_only',
                            help='Only reinstall the undercloud',
                            action='store_true', required=False)
        parser.add_argument('-overcloud_only', '--overcloud_only',
                            help='Only reinstall the overcloud',
                            action='store_true', required=False)
        parser.add_argument('-skip_dashboard_vm', '--skip_dashboard_vm',
                            help='Do not reinstall the Dashboard VM',
                            action='store_true',
                            required=False)
        parser.add_argument('-validate_only', '--validate_only',
                            help='No deployment - just validate config values',
                            action='store_true',
                            required=False)
        args, others = parser.parse_known_args()
        if len(others) > 0:
            parser.print_help()
            msg = "Invalid argument(s) :"
            for each in others:
                msg += " " + each + ";"
            raise AssertionError(msg)
        if args.validate_only is True:
            logger.info("Only validating ini/properties config values")
        else:
            if args.overcloud_only is True:
                logger.info("Only redeploying the overcloud")
            if args.skip_dashboard_vm is True:
                logger.info("Skipping Dashboard VM install")

        logger.debug("loading settings files " + args.settings)
        settings = Settings(args.settings)
        logger.info("Settings .ini: " + settings.settings_file)
        logger.info("Settings .properties " + settings.network_conf)
        settings.get_version_info()
        logger.info("source version # : " + settings.source_version)
        tester = Checkpoints()
        tester.verify_deployer_settings()
        if args.validate_only is True:
            logger.info("Settings validated")
            os._exit(0)

        if settings.retreive_switches_config is True:
            tester.retreive_switches_config()

        non_sah_nodes = (settings.controller_nodes +
                         settings.compute_nodes +
                         settings.ceph_nodes)

        sah_node = Sah()

        tester.sah_health_check()
        logger.info("Uploading configs/iso/scripts..")
        sah_node.clear_known_hosts()
        sah_node.handle_lock_files()
        sah_node.upload_iso()
        sah_node.upload_director_scripts()

        director_ip = settings.director_node.public_api_ip
        if args.overcloud_only is False:
            Ssh.execute_command(director_ip,
                                "root",
                                settings.director_node.root_password,
                                "subscription-manager remove --all")
            Ssh.execute_command(director_ip,
                                "root",
                                settings.director_node.root_password,
                                "subscription-manager unregister")
            sah_node.delete_director_vm()

            logger.info("=== create the director vm")
            sah_node.create_director_vm()
            tester.director_vm_health_check()

            logger.info("Preparing the Director VM")
            director_vm = Director()
            director_vm.apply_internal_repos()

            logger.debug(
                "===  Uploading & configuring undercloud.conf . "
                "environment yaml ===")
            director_vm.upload_update_conf_files()

            logger.info("=== installing the director & undercloud ===")
            director_vm.inject_ssh_key()
            director_vm.upload_cloud_images()
            director_vm.install_director()
            tester.verify_undercloud_installed()
            if args.undercloud_only:
                return
        else:
            logger.info("=== Skipped Director VM/Undercloud install")
            director_vm = Director()
            logger.debug("Deleting overcloud stack")
            director_vm.delete_overcloud()

        if args.skip_dashboard_vm is False:
            logger.debug("Delete the Dashboard VM")
            dashboard_ip = settings.dashboard_node.public_api_ip
            logger.debug(
                Ssh.execute_command(dashboard_ip,
                                    "root",
                                    settings.dashboard_node.root_password,
                                    "subscription-manager remove --all"))
            Ssh.execute_command(dashboard_ip,
                                "root",
                                settings.dashboard_node.root_password,
                                "subscription-manager unregister")

            sah_node.delete_dashboard_vm()

            logger.info("=== creating Dashboard VM")
            sah_node.create_dashboard_vm()

            tester.dashboard_vm_health_check()

        else:
            logger.info("Skipped the Dashboard VM install")

        logger.info("=== Preparing the overcloud ===")

        # The network-environment.yaml must be setup for use during DHCP
        # server configuration
        director_vm.setup_net_envt()
        director_vm.configure_dhcp_server()
        director_vm.node_discovery()
        director_vm.configure_idracs()
        director_vm.import_nodes()
        director_vm.node_introspection()
        director_vm.update_sshd_conf()
        director_vm.assign_node_roles()
        director_vm.revert_sshd_conf

        director_vm.setup_templates()
        logger.info("=== Installing the overcloud ")
        logger.debug("installing the overcloud ... this might take a while")
        director_vm.deploy_overcloud()
        cmd = "source ~/stackrc; openstack stack list | grep " \
              + settings.overcloud_name + " | awk '{print $6}'"
        overcloud_status = \
            Ssh.execute_command_tty(director_ip,
                                    settings.director_install_account_user,
                                    settings.director_install_account_pwd,
                                    cmd)[0]
        logger.debug("=== Overcloud stack state : " + overcloud_status)
        if settings.hpg_enable:
            logger.info(
                " HugePages has been successfully configured with size: " +
                settings.hpg_size)
        if settings.numa_enable:
            logger.info(
                " NUMA has been successfully configured"
                " with hostos_cpus count: " +
                settings.hostos_cpu_count)

        logger.info("====================================")
        logger.info(" OverCloud deployment status: " + overcloud_status)
        logger.info(" log : /auto_results/ ")
        logger.info("====================================")
        if "CREATE_COMPLETE" not in overcloud_status:
            raise AssertionError(
                "OverCloud did not install properly : " + overcloud_status)

        director_vm.summarize_deployment()
        tester.verify_computes_virtualization_enabled()
        tester.verify_backends_connectivity()
        if args.skip_dashboard_vm is False:
            director_vm.configure_dashboard()
        director_vm.enable_fencing()
        director_vm.enable_instance_ha()
        director_vm.configure_tempest()
        director_vm.run_sanity_test()
        run_tempest()
        logger.info("Deployment summary info; useful ip's etc.. " +
                    "/auto_results/deployment_summary.log")

    except:
        logger.error(traceback.format_exc())
        e = sys.exc_info()[0]
        logger.error(e)
        print e
        print traceback.format_exc()
        ret_code = 1
    logger.info("log : /auto_results/ ")
    sys.exit(ret_code)
Ejemplo n.º 18
0
 def run_tty(self, command):
     return Ssh.execute_command_tty(self.ip,
                                    self.user,
                                    self.pwd,
                                    command)
Ejemplo n.º 19
0
 def complete_cluster_setup(self):
     logger.info("- Complete the cluster setup")
     cmd = 'su - core -c \'./openshift-install --dir=openshift wait-for install-complete --log-level debug\''
     Ssh.execute_command_tty("localhost", "root",
                             self.settings.csah_root_pwd, cmd)
Ejemplo n.º 20
0
def deploy():
    ret_code = 0
    # noinspection PyBroadException

    logger.debug("=================================")
    logger.info("=== Starting up ...")
    logger.debug("=================================")
    try:
        settings, args = get_settings()
        if args.validate_only is True:
            logger.info("Only validating ini/properties config values")
        else:
            if args.overcloud_only is True:
                logger.info("Only redeploying the overcloud")
            if args.skip_dashboard_vm is True:
                logger.info("Skipping Dashboard VM install")

        logger.info("Settings .ini: " + settings.settings_file)
        logger.info("Settings .properties " + settings.network_conf)
        settings.get_version_info()
        logger.info("source version # : " + settings.source_version)
        tester = Checkpoints()
        tester.verify_deployer_settings()
        if args.validate_only is True:
            logger.info("Settings validated")
            os._exit(0)
        tester.retreive_switches_config()

        non_sah_nodes = (settings.controller_nodes + settings.compute_nodes +
                         settings.ceph_nodes)

        sah_node = Sah()

        tester.sah_health_check()
        # mutually exclusive command, configure tempest and quit.
        if args.tempest_config_only:
            logger.info("Only (re-)generating tempest.conf")
            director_vm = Director()
            director_vm.configure_tempest()
            os._exit(0)

        # mutually exclusive command, run tempest and quit.
        if args.run_tempest_only:
            logger.info("Only running tempest, will configure " +
                        "tempest.conf if needed.")
            director_vm = Director()
            director_vm.run_tempest()
            os._exit(0)

        logger.info("Uploading configs/iso/scripts.")
        sah_node.clear_known_hosts()
        sah_node.handle_lock_files()
        sah_node.upload_iso()
        sah_node.upload_director_scripts()

        director_ip = settings.director_node.public_api_ip
        if args.overcloud_only is False:
            Ssh.execute_command(director_ip, "root",
                                settings.director_node.root_password,
                                "subscription-manager remove --all")
            Ssh.execute_command(director_ip, "root",
                                settings.director_node.root_password,
                                "subscription-manager unregister")
            sah_node.delete_director_vm()

            logger.info("=== create the director vm")
            sah_node.create_director_vm()
            tester.director_vm_health_check()

            logger.info("Preparing the Director VM")
            director_vm = Director()
            director_vm.apply_internal_repos()

            logger.debug("===  Uploading & configuring undercloud.conf . "
                         "environment yaml ===")
            director_vm.upload_update_conf_files()

            logger.info("=== installing the director & undercloud ===")
            director_vm.inject_ssh_key()
            director_vm.upload_cloud_images()
            director_vm.install_director()
            tester.verify_undercloud_installed()
            if args.undercloud_only:
                return
        else:
            logger.info("=== Skipped Director VM/Undercloud install")
            director_vm = Director()
            logger.debug("Deleting overcloud stack")
            director_vm.delete_overcloud()

        if args.skip_dashboard_vm is False:
            logger.debug("Delete the Dashboard VM")
            dashboard_ip = settings.dashboard_node.public_api_ip
            logger.debug(
                Ssh.execute_command(dashboard_ip, "root",
                                    settings.dashboard_node.root_password,
                                    "subscription-manager remove --all"))
            Ssh.execute_command(dashboard_ip, "root",
                                settings.dashboard_node.root_password,
                                "subscription-manager unregister")

            sah_node.delete_dashboard_vm()

            logger.info("=== creating Dashboard VM")
            sah_node.create_dashboard_vm()

            tester.dashboard_vm_health_check()

        else:
            logger.info("Skipped the Dashboard VM install")

        logger.info("=== Preparing the overcloud ===")

        # The network-environment.yaml must be setup for use during DHCP
        # server configuration
        director_vm.setup_net_envt()
        director_vm.configure_dhcp_server()
        director_vm.node_discovery()
        director_vm.configure_idracs()
        director_vm.import_nodes()
        director_vm.node_introspection()
        director_vm.update_sshd_conf()
        director_vm.assign_node_roles()
        director_vm.revert_sshd_conf()

        director_vm.setup_templates()
        logger.info("=== Installing the overcloud ")
        logger.debug("installing the overcloud ... this might take a while")
        director_vm.deploy_overcloud()
        cmd = "source ~/stackrc; openstack stack list | grep " \
              + settings.overcloud_name + " | awk '{print $8}'"
        overcloud_status = \
            Ssh.execute_command_tty(director_ip,
                                    settings.director_install_account_user,
                                    settings.director_install_account_pwd,
                                    cmd)[0]
        logger.debug("=== Overcloud stack state : " + overcloud_status)
        if settings.hpg_enable:
            logger.info(
                " HugePages has been successfully configured with size: " +
                settings.hpg_size)
        if settings.numa_enable:
            logger.info(" NUMA has been successfully configured"
                        " with hostos_cpus count: " +
                        settings.hostos_cpu_count)

        logger.info("====================================")
        logger.info(" OverCloud deployment status: " + overcloud_status)
        logger.info(" log : /auto_results/ ")
        logger.info("====================================")
        if "CREATE_COMPLETE" not in overcloud_status:
            raise AssertionError("OverCloud did not install properly : " +
                                 overcloud_status)

        director_vm.summarize_deployment()
        tester.verify_computes_virtualization_enabled()
        tester.verify_backends_connectivity()
        if args.skip_dashboard_vm is False:
            director_vm.configure_dashboard()
        director_vm.enable_fencing()
        director_vm.run_sanity_test()

        external_sub_guid = director_vm.get_sanity_subnet()
        if external_sub_guid:
            director_vm.configure_tempest()

        run_tempest()

        logger.info("Deployment summary info; useful ip's etc.. " +
                    "/auto_results/deployment_summary.log")

    except:  # noqa: E722
        logger.error(traceback.format_exc())
        e = sys.exc_info()[0]
        logger.error(e)
        print e
        print traceback.format_exc()
        ret_code = 1
    logger.info("log : /auto_results/ ")
    sys.exit(ret_code)