def deploy_bcf(config, fuel_cluster_id): # Deploy setup node Helper.safe_print("Start to prepare setup node\n") env = Environment(config, fuel_cluster_id) Helper.common_setup_node_preparation(env) controller_node = None # Generate detailed node information Helper.safe_print("Start to setup Big Cloud Fabric\n") nodes_config = None if 'nodes' in config: nodes_yaml_config = config['nodes'] node_dic = Helper.load_nodes(nodes_yaml_config, env) # Generate scripts for each node for hostname, node in node_dic.iteritems(): if node.os == const.CENTOS: Helper.generate_scripts_for_centos(node) elif node.os == const.UBUNTU: Helper.generate_scripts_for_ubuntu(node) with open(const.LOG_FILE, "a") as log_file: log_file.write(str(node)) if node.skip: Helper.safe_print("skip node %(hostname)s due to %(error)s\n" % {'hostname' : hostname, 'error' : node.error}) continue node_q.put(node) if node.role == const.ROLE_NEUTRON_SERVER: controller_node = node elif node.deploy_dhcp_agent: dhcp_node_q.put(node) # Use multiple threads to setup nodes for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_node) t.daemon = True t.start() node_q.join() # Use multiple threads to setup up dhcp agent and metadata agent if controller_node: Helper.safe_print("Copy dhcp_agent.ini from openstack controller %(controller_node)s\n" % {'controller_node' : controller_node.hostname}) Helper.copy_file_from_remote(controller_node, '/etc/neutron', 'dhcp_agent.ini', controller_node.setup_node_dir) Helper.safe_print("Copy metadata_agent.ini from openstack controller %(controller_node)s\n" % {'controller_node' : controller_node.hostname}) Helper.copy_file_from_remote(controller_node, '/etc/neutron', 'metadata_agent.ini', controller_node.setup_node_dir) for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_dhcp_agent) t.daemon = True t.start() dhcp_node_q.join() Helper.safe_print("Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" % {'log' : const.LOG_FILE})
def deploy_bcf(config, fuel_cluster_id): # Deploy setup node Helper.safe_print("Start to prepare setup node\n") env = Environment(config, fuel_cluster_id) Helper.common_setup_node_preparation(env) # Generate detailed node information Helper.safe_print("Start to setup Big Cloud Fabric\n") nodes_config = None if 'nodes' in config: nodes_yaml_config = config['nodes'] node_dic = Helper.load_nodes(nodes_yaml_config, env) # Generate scripts for each node for hostname, node in node_dic.iteritems(): if node.os == const.CENTOS: Helper.generate_scripts_for_centos(node) elif node.os == const.UBUNTU: Helper.generate_scripts_for_ubuntu(node) with open(const.LOG_FILE, "a") as log_file: log_file.write(str(node)) if node.skip: Helper.safe_print("skip node %(hostname)s due to %(error)s\n" % { 'hostname': hostname, 'error': node.error }) continue node_q.put(node) # Use multiple threads to setup nodes for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_node) t.daemon = True t.start() node_q.join() Helper.safe_print( "Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" % {'log': const.LOG_FILE})
def deploy_bcf(config, fuel_cluster_id): # Deploy setup node Helper.safe_print("Start to prepare setup node\n") env = Environment(config, fuel_cluster_id) Helper.common_setup_node_preparation(env) # Generate detailed node information Helper.safe_print("Start to setup Big Cloud Fabric\n") nodes_config = None if 'nodes' in config: nodes_yaml_config = config['nodes'] node_dic = Helper.load_nodes(nodes_yaml_config, env) # Generate scripts for each node for hostname, node in node_dic.iteritems(): if node.os == const.CENTOS: Helper.generate_scripts_for_centos(node) elif node.os == const.UBUNTU: Helper.generate_scripts_for_ubuntu(node) with open(const.LOG_FILE, "a") as log_file: log_file.write(str(node)) if node.skip: Helper.safe_print("skip node %(hostname)s due to %(error)s\n" % {'hostname' : hostname, 'error' : node.error}) continue node_q.put(node) # Use multiple threads to setup nodes for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_node) t.daemon = True t.start() node_q.join() Helper.safe_print("Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" % {'log' : const.LOG_FILE})
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup, verify, verify_only, skip_ivs_version_check, certificate_dir, certificate_only, generate_csr, support, upgrade_dir): # Deploy setup node safe_print("Start to prepare setup node\n") env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup, skip_ivs_version_check, certificate_dir, upgrade_dir) Helper.common_setup_node_preparation(env) controller_nodes = [] # Generate detailed node information safe_print("Start to setup Big Cloud Fabric\n") nodes_yaml_config = config['nodes'] if 'nodes' in config else None node_dic = Helper.load_nodes(nodes_yaml_config, env) if upgrade_dir: return upgrade_bcf(node_dic) if generate_csr: safe_print("Start to generate csr for virtual switches.\n") # create ~/csr and ~/key directory Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR) Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR) for hostname, node in node_dic.iteritems(): if node.skip: safe_print("skip node %(fqdn)s due to %(error)s\n" % {'fqdn': node.fqdn, 'error': node.error}) continue if node.tag != node.env_tag: safe_print("skip node %(fqdn)s due to mismatched tag\n" % {'fqdn': node.fqdn}) continue if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE: Helper.generate_csr(node) safe_print("Finish generating csr for virtual switches.\n") return # copy neutron config from neutron server to setup node for hostname, node in node_dic.iteritems(): if node.role == const.ROLE_NEUTRON_SERVER: controller_nodes.append(node) Helper.copy_neutron_config_from_controllers(controller_nodes) # check if vlan is the tenant network type for fuel environment if not Helper.check_if_vlan_is_used(controller_nodes): safe_print("tenant network type is not vlan. Stop deploying.\n") return # prepare keystone client from /etc/neutron/api-paste.ini #Helper.prepare_keystone_client(controller_nodes) # Generate scripts for each node for hostname, node in node_dic.iteritems(): if support: support_node_q.put(node) if node.skip: safe_print("skip node %(fqdn)s due to %(error)s\n" % {'fqdn': node.fqdn, 'error': node.error}) continue if node.tag != node.env_tag: safe_print("skip node %(fqdn)s due to mismatched tag\n" % {'fqdn': node.fqdn}) continue if node.os == const.CENTOS: Helper.generate_scripts_for_centos(node) elif node.os == const.UBUNTU: Helper.generate_scripts_for_ubuntu(node) elif node.os == const.REDHAT: Helper.generate_scripts_for_redhat(node) if node.role == const.ROLE_NEUTRON_SERVER: controller_node_q.put(node) else: # python doesn't have deep copy for Queue, hence add to all node_q.put(node) verify_node_q.put(node) if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE: certify_node_q.put(node) if node.rhosp: Helper.chmod_node(node) with open(const.LOG_FILE, "a") as log_file: version = Helper.run_command_on_local("pip show bosi") log_file.write(str(version)) for hostname, node in node_dic.iteritems(): log_file.write(str(node)) if support: safe_print("Start to collect logs.\n") # copy installer logs to ~/support Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR) Helper.run_command_on_local("cp -r %(src)s %(dst)s" % {"src": const.LOG_FILE, "dst": const.SUPPORT_DIR}) Helper.run_command_on_local("cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" % {"setup_node_dir": env.setup_node_dir, "generated_script_dir": const.GENERATED_SCRIPT_DIR, "dst": const.SUPPORT_DIR}) for i in range(const.MAX_WORKERS): t = threading.Thread(target=support_node_setup, args=(support_node_q,)) t.daemon = True t.start() support_node_q.join() # compress ~/support Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support") safe_print("Finish collecting logs. logs are at /tmp/support.tar.gz.\n") return # in case of verify_only or certificate_only, do not deploy if (not verify_only) and (not certificate_only): # Use single thread to setup controller nodes t = threading.Thread(target=worker_setup_node, args=(controller_node_q,)) t.daemon = True t.start() controller_node_q.join() # Use multiple threads to setup compute nodes for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_node, args=(node_q,)) t.daemon = True t.start() node_q.join() sorted_time_dict = OrderedDict(sorted(time_dict.items(), key=lambda x: x[1])) for fqdn, h_time in sorted_time_dict.items(): safe_print("node: %(fqdn)s, time: %(time).2f\n" % {'fqdn': fqdn, 'time': h_time}) safe_print("Big Cloud Fabric deployment finished! " "Check %(log)s on each node for details.\n" % {'log': const.LOG_FILE}) if certificate_dir or certificate_only: # certify each node safe_print("Start to certify virtual switches.\n") for i in range(const.MAX_WORKERS): t = threading.Thread(target=certify_node_setup, args=(certify_node_q,)) t.daemon = True t.start() certify_node_q.join() safe_print('Certifying virtual switches done.\n') if verify or verify_only: # verify each node and post results safe_print("Verifying deployment for all compute nodes.\n") for i in range(const.MAX_WORKERS): t = threading.Thread(target=verify_node_setup, args=(verify_node_q,)) t.daemon = True t.start() verify_node_q.join() # print status # success nodes safe_print('Deployed successfully to: \n') for node_element in node_pass: safe_print(node_element + '\n') # failed nodes safe_print('Deployment to following failed: \n') for node_element in node_fail: safe_print(str(node_element) + ' : ' + str(node_fail[node_element]) + '\n')
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup, verify, verify_only, skip_ivs_version_check, certificate_dir, certificate_only, generate_csr, support, upgrade_dir, offline_dir, sriov): # Deploy setup node safe_print("Start to prepare setup node\n") env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup, skip_ivs_version_check, certificate_dir, upgrade_dir, offline_dir, sriov) Helper.common_setup_node_preparation(env) controller_nodes = [] # Generate detailed node information safe_print("Start to setup Big Cloud Fabric\n") nodes_yaml_config = config['nodes'] if 'nodes' in config else None node_dic = Helper.load_nodes(nodes_yaml_config, env) if upgrade_dir: return upgrade_bcf(node_dic) if sriov: return setup_sriov(node_dic) if generate_csr: safe_print("Start to generate csr for virtual switches.\n") # create ~/csr and ~/key directory Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR) Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR) for hostname, node in node_dic.iteritems(): if node.skip: safe_print("skip node %(fqdn)s due to %(error)s\n" % { 'fqdn': node.fqdn, 'error': node.error }) continue if node.tag != node.env_tag: safe_print("skip node %(fqdn)s due to mismatched tag\n" % {'fqdn': node.fqdn}) continue if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE: Helper.generate_csr(node) safe_print("Finish generating csr for virtual switches.\n") return # copy neutron config from neutron server to setup node for hostname, node in node_dic.iteritems(): if node.role == const.ROLE_NEUTRON_SERVER: controller_nodes.append(node) Helper.copy_neutron_config_from_controllers(controller_nodes) # check if vlan is the tenant network type for fuel environment if not Helper.check_if_vlan_is_used(controller_nodes): safe_print("tenant network type is not vlan. Stop deploying.\n") return # prepare keystone client from /etc/neutron/api-paste.ini #Helper.prepare_keystone_client(controller_nodes) # Generate scripts for each node for hostname, node in node_dic.iteritems(): if support: support_node_q.put(node) if node.skip: safe_print("skip node %(fqdn)s due to %(error)s\n" % { 'fqdn': node.fqdn, 'error': node.error }) continue if node.tag != node.env_tag: safe_print("skip node %(fqdn)s due to mismatched tag\n" % {'fqdn': node.fqdn}) continue if node.os == const.CENTOS: Helper.generate_scripts_for_centos(node) elif node.os == const.UBUNTU: Helper.generate_scripts_for_ubuntu(node) elif node.os == const.REDHAT: Helper.generate_scripts_for_redhat(node) if node.role == const.ROLE_NEUTRON_SERVER: controller_node_q.put(node) else: # python doesn't have deep copy for Queue, hence add to all node_q.put(node) verify_node_q.put(node) if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE: certify_node_q.put(node) if node.rhosp: Helper.chmod_node(node) with open(const.LOG_FILE, "a") as log_file: version = Helper.run_command_on_local("pip show bosi") log_file.write(str(version)) for hostname, node in node_dic.iteritems(): log_file.write(str(node)) if support: safe_print("Start to collect logs.\n") # copy installer logs to ~/support Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR) Helper.run_command_on_local("cp -r %(src)s %(dst)s" % { "src": const.LOG_FILE, "dst": const.SUPPORT_DIR }) Helper.run_command_on_local( "cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" % { "setup_node_dir": env.setup_node_dir, "generated_script_dir": const.GENERATED_SCRIPT_DIR, "dst": const.SUPPORT_DIR }) for i in range(const.MAX_WORKERS): t = threading.Thread(target=support_node_setup, args=(support_node_q, )) t.daemon = True t.start() support_node_q.join() # compress ~/support Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support") safe_print( "Finish collecting logs. logs are at /tmp/support.tar.gz.\n") return # in case of verify_only or certificate_only, do not deploy if (not verify_only) and (not certificate_only): # Use single thread to setup controller nodes t = threading.Thread(target=worker_setup_node, args=(controller_node_q, )) t.daemon = True t.start() controller_node_q.join() # Use multiple threads to setup compute nodes for i in range(const.MAX_WORKERS): t = threading.Thread(target=worker_setup_node, args=(node_q, )) t.daemon = True t.start() node_q.join() sorted_time_dict = OrderedDict( sorted(time_dict.items(), key=lambda x: x[1])) for fqdn, h_time in sorted_time_dict.items(): safe_print("node: %(fqdn)s, time: %(time).2f\n" % { 'fqdn': fqdn, 'time': h_time }) safe_print("Big Cloud Fabric deployment finished! " "Check %(log)s on each node for details.\n" % {'log': const.LOG_FILE}) if certificate_dir or certificate_only: # certify each node safe_print("Start to certify virtual switches.\n") for i in range(const.MAX_WORKERS): t = threading.Thread(target=certify_node_setup, args=(certify_node_q, )) t.daemon = True t.start() certify_node_q.join() safe_print('Certifying virtual switches done.\n') if verify or verify_only: # verify each node and post results safe_print("Verifying deployment for all compute nodes.\n") for i in range(const.MAX_WORKERS): t = threading.Thread(target=verify_node_setup, args=(verify_node_q, )) t.daemon = True t.start() verify_node_q.join() # print status # success nodes safe_print('Deployed successfully to: \n') for node_element in node_pass: safe_print(node_element + '\n') # failed nodes safe_print('Deployment to following failed: \n') for node_element in node_fail: safe_print( str(node_element) + ' : ' + str(node_fail[node_element]) + '\n')