def install_node(orig_id, seed_id, node_ids, isolated=False, networks=None): if orig_id == seed_id: raise Exception("Original and seed environments have the same ID: %s", orig_id) orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) check_networks(orig_env, seed_env, networks) nodes = [node_obj.Node(node_id) for node_id in node_ids] orig_node = env_util.get_one_controller(orig_env) seed_env.assign(nodes, orig_node.data['roles']) for node in nodes: disk_info_fixture = orig_node.get_attribute('disks') nic_info_fixture = orig_node.get_attribute('interfaces') update_node_settings(node, disk_info_fixture, nic_info_fixture) if networks: env_util.clone_ips(orig_id, networks) LOG.info("Nodes reboot in progress. Please wait...") node_util.reboot_nodes(nodes, timeout=180 * 60) node_util.wait_for_mcollective_start(nodes) env_util.provision_nodes(seed_env, nodes) env_util.update_deployment_info(seed_env, isolated) if isolated and len(nodes) > 1: isolate(nodes, seed_env) env_util.deploy_changes(seed_env, nodes) for node in nodes: controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).postdeploy()
def install_node(orig_id, seed_id, node_ids, isolated=False): env = environment_obj.Environment nodes = [node_obj.Node(node_id) for node_id in node_ids] if orig_id == seed_id: raise Exception("Original and seed environments have the same ID: %s", orig_id) orig_env = env(orig_id) orig_node = env_util.get_one_controller(orig_env) seed_env = env(seed_id) seed_env.assign(nodes, orig_node.data['roles']) for node in nodes: disk_info_fixture = orig_node.get_attribute('disks') nic_info_fixture = orig_node.get_attribute('interfaces') update_node_settings(node, disk_info_fixture, nic_info_fixture) env_util.provision_nodes(seed_env, nodes) for node in nodes: # FIXME: properly call all handlers all over the place controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).predeploy() if len(nodes) > 1: isolate(nodes, seed_env) env_util.deploy_changes(seed_env, nodes) for node in nodes: controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).postdeploy()
def upgrade_node(env_id, node_ids, isolated=False): # From check_deployment_status env = environment_obj.Environment(env_id) if env.data['status'] != 'new': raise Exception("Environment must be in 'new' status") nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) env_util.provision_nodes(env, nodes) call_handlers('predeploy') env_util.deploy_nodes(env, nodes) call_handlers('postdeploy')
def upgrade_node(env_id, node_ids, isolated=False, network_template=None): # From check_deployment_status env = environment_obj.Environment(env_id) nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) copy_patches_folder_to_nailgun() disk.update_partition_generator() call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) call_handlers('predeploy') if network_template: env_util.set_network_template(env, network_template) env_util.deploy_nodes(env, nodes) call_handlers('postdeploy')
def zabbix_snmptrapd_settings(astute, attrs): node = node_obj.Node(astute['uid']) with ssh.sftp(node).open('/etc/snmp/snmptrapd.conf') as f: data = f.read() template = re.compile(r"authCommunity\s[a-z-,]+\s([a-z-]+)") match = template.search(data) attrs['community']['value'] = match.group(1) attrs['metadata']['enabled'] = True
def upgrade_node(env_id, node_ids, isolated=False, network_template=None): # From check_deployment_status env = environment_obj.Environment(env_id) nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id patch_partition_generator(one_orig_id) call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) # NOTE(aroma): copying of VIPs must be done after node reassignment # as according to [1] otherwise the operation will not take any effect # [1]: https://bugs.launchpad.net/fuel/+bug/1549254 env_util.copy_vips(env) call_handlers('predeploy') if network_template: env_util.set_network_template(env, network_template) if isolated or len(nodes) == 1: env_util.deploy_nodes(env, nodes) else: env_util.deploy_changes(env, nodes) call_handlers('postdeploy')