Esempio n. 1
0
File: bosi.py Progetto: xinwu/bosi-1
def main():
    # Parse configuration
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config-file", required=True,
                        help="BCF YAML configuration file")
    parser.add_argument("-m", "--deploy-mode", required=False,
                        choices=['pfabric', 'pvfabric']),
    parser.add_argument('-f', "--fuel-cluster-id", required=False,
                        help=("Fuel cluster ID. Fuel settings may override "
                              "YAML configuration. "
                              "Please refer to config.yaml"))
    parser.add_argument('-t', "--tag", required=False,
                        help="Deploy to tagged nodes only.")
    parser.add_argument('--cleanup', action='store_true', default=False,
                        help="Clean up existing routers, "
                             "networks and projects.")
    parser.add_argument('--skip-ivs-version-check', action='store_true',
                        default=False, help="Skip ivs version check.")
    parser.add_argument('--verify', action='store_true', default=False,
                        help="Verify service status for compute nodes "
                             "after deployment.")
    parser.add_argument('--verifyonly', action='store_true', default=False,
                        help=("Verify service status for compute nodes "
                              "after deployment. Does not deploy BCF "
                              "specific changes."))
    parser.add_argument('--certificate-dir', required=False,
                        help=("The directory that has the certificates for "
                              "virtual switches. This option requires certificates "
                              "to be ready in the directory. This option will deploy "
                              "certificate to the corresponding node based on the mac "
                              "address. Virtual switch will talk TLS afterward."))
    parser.add_argument('--certificate-only', action='store_true', default=False,
                        help=("By turning on this flag, bosi will only deploy certificate "
                              "to each node. It requires --certificate-dir to be specified."))
    parser.add_argument('--generate-csr', action='store_true', default=False,
                        help=("By turning on this flag, bosi will generate csr on behalf of "
                              "virtual switches. User needs to certify these csr and use "
                              "--certificate-dir to specify the certificate directory."))
    parser.add_argument('--support', action='store_true', default=False,
                        help=("Collect openstack logs."))
    parser.add_argument('--upgrade-dir', required=False,
                        help=("The directory that has the packages for upgrade."))


    args = parser.parse_args()
    if args.certificate_only and (not args.certificate_dir):
        safe_print("--certificate-only requires the existence of --certificate-dir.\n")
        return

    with open(args.config_file, 'r') as config_file:
        config = yaml.load(config_file)
    # bosi is not used for redhat any more since 3.6
    rhosp = False
    deploy_bcf(config, args.deploy_mode, args.fuel_cluster_id, rhosp,
               args.tag, args.cleanup,
               args.verify, args.verifyonly,
               args.skip_ivs_version_check,
               args.certificate_dir, args.certificate_only,
               args.generate_csr, args.support,
               args.upgrade_dir)
Esempio n. 2
0
File: bosi.py Progetto: xinwu/bosi-1
def worker_upgrade_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(node,
            (r'''sudo bash %(dst_dir)s/%(hostname)s_upgrade.sh''' %
            {'dst_dir': node.dst_dir,
             'hostname': node.hostname,
             'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        safe_print("Finish upgrading %(fqdn)s, cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 3
0
File: bosi.py Progetto: xinwu/bosi-1
def certify_node_setup(q):
    while True:
        node = q.get()
        if node.certificate_dir:
            if not os.path.isfile("%s/ca.cert" % node.certificate_dir):
                safe_print("Missing ca.cert in %s\n" % node.certificate_dir)
                break
        Helper.certify_node(node)
        q.task_done()
Esempio n. 4
0
def certify_node_setup(q):
    while True:
        node = q.get()
        if node.certificate_dir:
            if not os.path.isfile("%s/ca.cert" % node.certificate_dir):
                safe_print("Missing ca.cert in %s\n" % node.certificate_dir)
                break
        Helper.certify_node(node)
        q.task_done()
Esempio n. 5
0
def worker_setup_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" % {'fqdn': node.fqdn})
        if node.cleanup and node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(
                node, (r'''sudo bash %(dst_dir)s/%(hostname)s_ospurge.sh''' % {
                    'dst_dir': node.dst_dir,
                    'hostname': node.hostname,
                    'log': node.log
                }))

        # a random delay to smooth apt-get/yum
        delay = random.random() * 10.0
        time.sleep(delay)

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(
            node, (r'''sudo bash %(dst_dir)s/%(hostname)s.sh''' % {
                'dst_dir': node.dst_dir,
                'hostname': node.hostname,
                'log': node.log
            }))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        # when deploying T5 on UBUNTU, reboot compute nodes
        Helper.reboot_if_necessary(node)

        safe_print("Finish deploying %(fqdn)s, cost time: %(diff).2f\n" % {
            'fqdn': node.fqdn,
            'diff': node.time_diff
        })
        q.task_done()
Esempio n. 6
0
File: bosi.py Progetto: zoirboy/bosi
def worker_upgrade_or_sriov_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})

        start_time = datetime.datetime.now()
        if node.role == const.ROLE_SRIOV:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_sriov.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        elif node.role in const.DPDK_ROLES:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_dpdk.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        else:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_upgrade.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        safe_print("Finish executing script for node %(fqdn)s, "
                   "cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 7
0
File: bosi.py Progetto: xinwu/bosi-1
def worker_setup_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})
        if node.cleanup and node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_ospurge.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))

        # a random delay to smooth apt-get/yum
        delay = random.random() * 10.0
        time.sleep(delay)

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(node,
            (r'''sudo bash %(dst_dir)s/%(hostname)s.sh''' %
            {'dst_dir': node.dst_dir,
             'hostname': node.hostname,
             'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        # when deploying T5 on UBUNTU, reboot compute nodes
        Helper.reboot_if_necessary(node)

        safe_print("Finish deploying %(fqdn)s, cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 8
0
def upgrade_bcf(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_upgrade_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_upgrade_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_upgrade_scripts_for_redhat(node)

        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup compute nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_or_sriov_node,
                             args=(node_q, ))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(
        sorted(time_dict.items(), key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
            'fqdn': fqdn,
            'time': h_time
        })

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
               {'log': const.LOG_FILE})
Esempio n. 9
0
File: bosi.py Progetto: xinwu/bosi-1
def upgrade_bcf(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" %
                      {'fqdn': node.fqdn, 'error': node.error})
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                      {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_upgrade_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_upgrade_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_upgrade_scripts_for_redhat(node)

        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup compute nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_node, args=(node_q,))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(sorted(time_dict.items(),
                                          key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" %
                   {'fqdn': fqdn, 'time': h_time})

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
              {'log': const.LOG_FILE})
Esempio n. 10
0
File: bosi.py Progetto: xinwu/bosi-1
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
               verify, verify_only, skip_ivs_version_check,
               certificate_dir, certificate_only, generate_csr,
               support, upgrade_dir):
    # Deploy setup node
    safe_print("Start to prepare setup node\n")
    env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
                      skip_ivs_version_check, certificate_dir, upgrade_dir)
    Helper.common_setup_node_preparation(env)
    controller_nodes = []

    # Generate detailed node information
    safe_print("Start to setup Big Cloud Fabric\n")
    nodes_yaml_config = config['nodes'] if 'nodes' in config else None
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    if upgrade_dir:
        return upgrade_bcf(node_dic)

    if generate_csr:
        safe_print("Start to generate csr for virtual switches.\n")
        # create ~/csr and ~/key directory
        Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR)
        Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR)
        for hostname, node in node_dic.iteritems():
            if node.skip:
                safe_print("skip node %(fqdn)s due to %(error)s\n" %
                           {'fqdn': node.fqdn, 'error': node.error})
                continue

            if node.tag != node.env_tag:
                safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                           {'fqdn': node.fqdn})
                continue
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                Helper.generate_csr(node)
        safe_print("Finish generating csr for virtual switches.\n")
        return

    # copy neutron config from neutron server to setup node
    for hostname, node in node_dic.iteritems():
        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_nodes.append(node)
    Helper.copy_neutron_config_from_controllers(controller_nodes)

    # check if vlan is the tenant network type for fuel environment
    if not Helper.check_if_vlan_is_used(controller_nodes):
        safe_print("tenant network type is not vlan. Stop deploying.\n")
        return

    # prepare keystone client from /etc/neutron/api-paste.ini
    #Helper.prepare_keystone_client(controller_nodes)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if support:
            support_node_q.put(node)

        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" %
                       {'fqdn': node.fqdn, 'error': node.error})
            continue

        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_scripts_for_redhat(node)

        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_node_q.put(node)
        else:
            # python doesn't have deep copy for Queue, hence add to all
            node_q.put(node)
            verify_node_q.put(node)
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                certify_node_q.put(node)

        if node.rhosp:
            Helper.chmod_node(node)

    with open(const.LOG_FILE, "a") as log_file:
        version = Helper.run_command_on_local("pip show bosi")
        log_file.write(str(version))
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    if support:
        safe_print("Start to collect logs.\n")
        # copy installer logs to ~/support
        Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR)
        Helper.run_command_on_local("cp -r %(src)s %(dst)s" %
                                   {"src": const.LOG_FILE,
                                    "dst": const.SUPPORT_DIR})
        Helper.run_command_on_local("cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" %
                                   {"setup_node_dir": env.setup_node_dir,
                                    "generated_script_dir": const.GENERATED_SCRIPT_DIR,
                                    "dst": const.SUPPORT_DIR})

        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=support_node_setup,
                                 args=(support_node_q,))
            t.daemon = True
            t.start()
        support_node_q.join()
        # compress ~/support
        Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support")
        safe_print("Finish collecting logs. logs are at /tmp/support.tar.gz.\n")
        return

    # in case of verify_only or certificate_only, do not deploy
    if (not verify_only) and (not certificate_only):
        # Use single thread to setup controller nodes
        t = threading.Thread(target=worker_setup_node,
                             args=(controller_node_q,))
        t.daemon = True
        t.start()
        controller_node_q.join()

        # Use multiple threads to setup compute nodes
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=worker_setup_node, args=(node_q,))
            t.daemon = True
            t.start()
        node_q.join()

        sorted_time_dict = OrderedDict(sorted(time_dict.items(),
                                              key=lambda x: x[1]))
        for fqdn, h_time in sorted_time_dict.items():
            safe_print("node: %(fqdn)s, time: %(time).2f\n" %
                       {'fqdn': fqdn, 'time': h_time})

        safe_print("Big Cloud Fabric deployment finished! "
                   "Check %(log)s on each node for details.\n" %
                   {'log': const.LOG_FILE})

    if certificate_dir or certificate_only:
        # certify each node
        safe_print("Start to certify virtual switches.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=certify_node_setup,
                                 args=(certify_node_q,))
            t.daemon = True
            t.start()
        certify_node_q.join()
        safe_print('Certifying virtual switches done.\n')

    if verify or verify_only:
        # verify each node and post results
        safe_print("Verifying deployment for all compute nodes.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=verify_node_setup,
                                 args=(verify_node_q,))
            t.daemon = True
            t.start()
        verify_node_q.join()
        # print status
        # success nodes
        safe_print('Deployed successfully to: \n')
        for node_element in node_pass:
            safe_print(node_element + '\n')
        # failed nodes
        safe_print('Deployment to following failed: \n')
        for node_element in node_fail:
            safe_print(str(node_element) + ' : '
                       + str(node_fail[node_element]) + '\n')
Esempio n. 11
0
def main():
    # Parse configuration
    parser = argparse.ArgumentParser()
    parser.add_argument("-c",
                        "--config-file",
                        required=True,
                        help="BCF YAML configuration file")
    parser.add_argument("-m",
                        "--deploy-mode",
                        required=False,
                        choices=['pfabric', 'pvfabric']),
    parser.add_argument('-f',
                        "--fuel-cluster-id",
                        required=False,
                        help=("Fuel cluster ID. Fuel settings may override "
                              "YAML configuration. "
                              "Please refer to config.yaml"))
    parser.add_argument('-r',
                        "--rhosp",
                        action='store_true',
                        default=False,
                        help="red hat openstack director is the installer "
                        "(upgrade only supported).")
    parser.add_argument('-t',
                        "--tag",
                        required=False,
                        help="Deploy to tagged nodes only.")
    parser.add_argument('--cleanup',
                        action='store_true',
                        default=False,
                        help="Clean up existing routers, "
                        "networks and projects.")
    parser.add_argument('--skip-ivs-version-check',
                        action='store_true',
                        default=False,
                        help="Skip ivs version check.")
    parser.add_argument('--verify',
                        action='store_true',
                        default=False,
                        help="Verify service status for compute nodes "
                        "after deployment.")
    parser.add_argument('--verifyonly',
                        action='store_true',
                        default=False,
                        help=("Verify service status for compute nodes "
                              "after deployment. Does not deploy BCF "
                              "specific changes."))
    parser.add_argument(
        '--certificate-dir',
        required=False,
        help=("The directory that has the certificates for "
              "virtual switches. This option requires certificates "
              "to be ready in the directory. This option will deploy "
              "certificate to the corresponding node based on the mac "
              "address. Virtual switch will talk TLS afterward."))
    parser.add_argument(
        '--certificate-only',
        action='store_true',
        default=False,
        help=("By turning on this flag, bosi will only deploy certificate "
              "to each node. It requires --certificate-dir to be specified."))
    parser.add_argument(
        '--generate-csr',
        action='store_true',
        default=False,
        help=("By turning on this flag, bosi will generate csr on behalf of "
              "virtual switches. User needs to certify these csr and use "
              "--certificate-dir to specify the certificate directory."))
    parser.add_argument('--support',
                        action='store_true',
                        default=False,
                        help=("Collect openstack logs."))
    parser.add_argument(
        '--upgrade-dir',
        required=False,
        help=("The directory that has the packages for upgrade."))
    parser.add_argument(
        '--offline-dir',
        required=False,
        help=("The directory that has the packages for offline installation."))
    parser.add_argument('--sriov',
                        action='store_true',
                        default=False,
                        help=("Deploy changes necessary for SRIOV mode to "
                              "nodes specified in config.yaml. Only works "
                              "with RHOSP."))

    args = parser.parse_args()
    if args.fuel_cluster_id and args.rhosp:
        safe_print("Cannot have both fuel and rhosp as openstack installer.\n")
        return
    if args.rhosp and not (args.upgrade_dir or args.sriov):
        safe_print("BOSI for RHOSP only supports upgrading packages or "
                   "SRIOV deployment.\n"
                   "Please specify --upgrade-dir or --sriov.\n")
        return
    if args.offline_dir and args.upgrade_dir:
        safe_print(
            "Cannot have both --offline-dir and --upgrade-dir. Please specify one."
        )
        return
    if args.certificate_only and (not args.certificate_dir):
        safe_print(
            "--certificate-only requires the existence of --certificate-dir.\n"
        )
        return
    if args.sriov and not args.rhosp:
        safe_print("SRIOV is only supported for RHOSP. \n"
                   "Please specify --rhosp.")

    with open(args.config_file, 'r') as config_file:
        config = yaml.load(config_file)
    deploy_bcf(config, args.deploy_mode, args.fuel_cluster_id, args.rhosp,
               args.tag, args.cleanup, args.verify, args.verifyonly,
               args.skip_ivs_version_check, args.certificate_dir,
               args.certificate_only, args.generate_csr, args.support,
               args.upgrade_dir, args.offline_dir, args.sriov)
Esempio n. 12
0
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup, verify,
               verify_only, skip_ivs_version_check, certificate_dir,
               certificate_only, generate_csr, support, upgrade_dir,
               offline_dir, sriov):
    # Deploy setup node
    safe_print("Start to prepare setup node\n")
    env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
                      skip_ivs_version_check, certificate_dir, upgrade_dir,
                      offline_dir, sriov)
    Helper.common_setup_node_preparation(env)
    controller_nodes = []

    # Generate detailed node information
    safe_print("Start to setup Big Cloud Fabric\n")
    nodes_yaml_config = config['nodes'] if 'nodes' in config else None
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    if upgrade_dir:
        return upgrade_bcf(node_dic)

    if sriov:
        return setup_sriov(node_dic)

    if generate_csr:
        safe_print("Start to generate csr for virtual switches.\n")
        # create ~/csr and ~/key directory
        Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR)
        Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR)
        for hostname, node in node_dic.iteritems():
            if node.skip:
                safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                    'fqdn': node.fqdn,
                    'error': node.error
                })
                continue

            if node.tag != node.env_tag:
                safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                           {'fqdn': node.fqdn})
                continue
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                Helper.generate_csr(node)
        safe_print("Finish generating csr for virtual switches.\n")
        return

    # copy neutron config from neutron server to setup node
    for hostname, node in node_dic.iteritems():
        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_nodes.append(node)
    Helper.copy_neutron_config_from_controllers(controller_nodes)

    # check if vlan is the tenant network type for fuel environment
    if not Helper.check_if_vlan_is_used(controller_nodes):
        safe_print("tenant network type is not vlan. Stop deploying.\n")
        return

    # prepare keystone client from /etc/neutron/api-paste.ini
    #Helper.prepare_keystone_client(controller_nodes)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if support:
            support_node_q.put(node)

        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue

        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_scripts_for_redhat(node)

        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_node_q.put(node)
        else:
            # python doesn't have deep copy for Queue, hence add to all
            node_q.put(node)
            verify_node_q.put(node)
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                certify_node_q.put(node)

        if node.rhosp:
            Helper.chmod_node(node)

    with open(const.LOG_FILE, "a") as log_file:
        version = Helper.run_command_on_local("pip show bosi")
        log_file.write(str(version))
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    if support:
        safe_print("Start to collect logs.\n")
        # copy installer logs to ~/support
        Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR)
        Helper.run_command_on_local("cp -r %(src)s %(dst)s" % {
            "src": const.LOG_FILE,
            "dst": const.SUPPORT_DIR
        })
        Helper.run_command_on_local(
            "cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" % {
                "setup_node_dir": env.setup_node_dir,
                "generated_script_dir": const.GENERATED_SCRIPT_DIR,
                "dst": const.SUPPORT_DIR
            })

        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=support_node_setup,
                                 args=(support_node_q, ))
            t.daemon = True
            t.start()
        support_node_q.join()
        # compress ~/support
        Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support")
        safe_print(
            "Finish collecting logs. logs are at /tmp/support.tar.gz.\n")
        return

    # in case of verify_only or certificate_only, do not deploy
    if (not verify_only) and (not certificate_only):
        # Use single thread to setup controller nodes
        t = threading.Thread(target=worker_setup_node,
                             args=(controller_node_q, ))
        t.daemon = True
        t.start()
        controller_node_q.join()

        # Use multiple threads to setup compute nodes
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=worker_setup_node, args=(node_q, ))
            t.daemon = True
            t.start()
        node_q.join()

        sorted_time_dict = OrderedDict(
            sorted(time_dict.items(), key=lambda x: x[1]))
        for fqdn, h_time in sorted_time_dict.items():
            safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
                'fqdn': fqdn,
                'time': h_time
            })

        safe_print("Big Cloud Fabric deployment finished! "
                   "Check %(log)s on each node for details.\n" %
                   {'log': const.LOG_FILE})

    if certificate_dir or certificate_only:
        # certify each node
        safe_print("Start to certify virtual switches.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=certify_node_setup,
                                 args=(certify_node_q, ))
            t.daemon = True
            t.start()
        certify_node_q.join()
        safe_print('Certifying virtual switches done.\n')

    if verify or verify_only:
        # verify each node and post results
        safe_print("Verifying deployment for all compute nodes.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=verify_node_setup,
                                 args=(verify_node_q, ))
            t.daemon = True
            t.start()
        verify_node_q.join()
        # print status
        # success nodes
        safe_print('Deployed successfully to: \n')
        for node_element in node_pass:
            safe_print(node_element + '\n')
        # failed nodes
        safe_print('Deployment to following failed: \n')
        for node_element in node_fail:
            safe_print(
                str(node_element) + ' : ' + str(node_fail[node_element]) +
                '\n')
Esempio n. 13
0
def setup_sriov(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue
        if node.role != const.ROLE_SRIOV:
            safe_print("Skipping node %(hostname)s because deployment mode is "
                       "SRIOV and role set for node is not SRIOV. It is "
                       "%(role)s\n" % {
                           'hostname': hostname,
                           'role': node.role
                       })
            continue
        if node.os != const.REDHAT:
            safe_print("Skipping node %(hostname)s because deployment mode is "
                       "SRIOV and non REDHAT OS is not supported. OS set for "
                       "node is %(os)s\n" % {
                           'hostname': hostname,
                           'os': node.os
                       })
            continue

        # all okay, generate scripts for node
        Helper.generate_sriov_scripts_for_redhat(node)
        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_or_sriov_node,
                             args=(node_q, ))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(
        sorted(time_dict.items(), key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
            'fqdn': fqdn,
            'time': h_time
        })

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
               {'log': const.LOG_FILE})