def upgrade_db(orig_id, seed_id, db_role_name): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) env_util.delete_fuel_resources(seed_env) # Wait for Neutron to reconfigure networks time.sleep(7) # FIXME: Use more deterministic way maintenance.disable_apis(orig_env) maintenance.stop_corosync_services(seed_env) maintenance.stop_upstart_services(seed_env) expected_dbs = set(magic_consts.OS_SERVICES) existing_dbs = set(db.get_databases(orig_env)) dbs = existing_dbs & expected_dbs if len(dbs) < len(magic_consts.OS_SERVICES): LOG.info('Skipping nonexistent tables: %s', ', '.join(expected_dbs - existing_dbs)) LOG.info('Will dump tables: %s', ', '.join(dbs)) fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz') db.mysqldump_from_env(orig_env, db_role_name, dbs, fname) fname2 = os.path.join( magic_consts.FUEL_CACHE, 'dbs.original.cluster_%s.sql.gz' % (orig_env.data['id'], ), ) shutil.copy(fname, fname2) db.mysqldump_restore_to_env(seed_env, db_role_name, fname) db.db_sync(seed_env)
def upgrade_control_plane(orig_id, seed_id): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) controllers = list(env_util.get_controllers(seed_env)) update_neutron_config(orig_env, seed_env) # enable all services on seed env if len(controllers) > 1: maintenance.stop_cluster(seed_env) else: maintenance.start_corosync_services(seed_env) maintenance.start_upstart_services(seed_env) # disable cluster services on orig env maintenance.stop_cluster(orig_env) # switch networks to seed env roles = ['primary-controller', 'controller'] # disable physical connectivity for orig env for node, info in env_util.iter_deployment_info(orig_env, roles): network.delete_patch_ports(node, info) # enable physical connectivity for seed env for node, info in env_util.iter_deployment_info(seed_env, roles): network.delete_overlay_networks(node, info) network.create_patch_ports(node, info) # enable all services on seed env if len(controllers) > 1: maintenance.start_cluster(seed_env) maintenance.start_corosync_services(seed_env) maintenance.start_upstart_services(seed_env)
def transfer_plugins_settings(orig_env_id, seed_env_id, plugins): orig_env = environment.Environment(orig_env_id) seed_env = environment.Environment(seed_env_id) astute = env_util.get_astute_yaml(orig_env) attrs = seed_env.get_settings_data() editable_attrs = attrs['editable'] plugin_fns = {} plugin_attrs = {} for plugin in plugins: try: plugin_fns[plugin] = PLUGINS[plugin] except KeyError: raise UnknownPlugin(plugin) try: plugin_attrs[plugin] = editable_attrs[plugin] except KeyError: raise PluginNotConfigured(plugin, seed_env_id) for plugin in plugins: LOG.info("Fetching settings for plugin '%s'", plugin) plugin_fn = plugin_fns[plugin] plugin_attr = plugin_attrs[plugin] plugin_fn(astute, plugin_attr) seed_env.set_settings_data(attrs)
def install_node(orig_id, seed_id, node_ids, isolated=False, networks=None): if orig_id == seed_id: raise Exception("Original and seed environments have the same ID: %s", orig_id) orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) check_networks(orig_env, seed_env, networks) nodes = [node_obj.Node(node_id) for node_id in node_ids] orig_node = env_util.get_one_controller(orig_env) seed_env.assign(nodes, orig_node.data['roles']) for node in nodes: disk_info_fixture = orig_node.get_attribute('disks') nic_info_fixture = orig_node.get_attribute('interfaces') update_node_settings(node, disk_info_fixture, nic_info_fixture) if networks: env_util.clone_ips(orig_id, networks) LOG.info("Nodes reboot in progress. Please wait...") node_util.reboot_nodes(nodes, timeout=180 * 60) node_util.wait_for_mcollective_start(nodes) env_util.provision_nodes(seed_env, nodes) env_util.update_deployment_info(seed_env, isolated) if isolated and len(nodes) > 1: isolate(nodes, seed_env) env_util.deploy_changes(seed_env, nodes) for node in nodes: controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).postdeploy()
def install_node(orig_id, seed_id, node_ids, isolated=False, networks=None): if orig_id == seed_id: raise Exception("Original and seed environments have the same ID: %s", orig_id) orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) check_networks(orig_env, seed_env, networks) nodes = [node_obj.Node(node_id) for node_id in node_ids] orig_node = env_util.get_one_controller(orig_env) seed_env.assign(nodes, orig_node.data['roles']) for node in nodes: disk_info_fixture = orig_node.get_attribute('disks') nic_info_fixture = orig_node.get_attribute('interfaces') update_node_settings(node, disk_info_fixture, nic_info_fixture) if networks: env_util.clone_ips(orig_id, networks) env_util.provision_nodes(seed_env, nodes) for node in nodes: # FIXME: properly call all handlers all over the place controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).predeploy() if len(nodes) > 1: isolate(nodes, seed_env) env_util.deploy_changes(seed_env, nodes) for node in nodes: controller_upgrade.ControllerUpgrade(node, seed_env, isolated=isolated).postdeploy()
def upgrade_ceph(orig_id, seed_id): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) tar_filename = os.path.join(magic_consts.FUEL_CACHE, "env-{0}-ceph.conf.tar.gz".format(orig_id)) conf_filename, db_path = extract_mon_conf_files(orig_env, tar_filename) ceph_set_new_mons(seed_env, tar_filename, conf_filename, db_path)
def upgrade_control_plane(orig_id, seed_id): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) start_corosync_services(seed_env) start_upstart_services(seed_env) disconnect_networks(orig_env) connect_to_networks(seed_env) update_neutron_config(seed_env)
def transfer_plugins_settings(orig_env_id, seed_env_id, plugins): orig_env = environment.Environment(orig_env_id) seed_env = environment.Environment(seed_env_id) astute = env_util.get_astute_yaml(orig_env) attrs = seed_env.get_settings_data() for plugin in plugins: LOG.info("Fetching settings for plugin '%s'", plugin) PLUGINS[plugin](astute, attrs['editable'][plugin]) seed_env.set_settings_data(attrs)
def upgrade_db(orig_id, seed_id): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) env_util.delete_fuel_resources(seed_env) # Wait for Neutron to reconfigure networks time.sleep(7) # FIXME: Use more deterministic way maintenance.disable_apis(orig_env) maintenance.stop_corosync_services(seed_env) maintenance.stop_upstart_services(seed_env) fname = mysqldump_from_env(orig_env) mysqldump_restore_to_env(seed_env, fname) db_sync(seed_env)
def prepare(orig_id, seed_id): orig_env = environment_obj.Environment(orig_id) seed_env = environment_obj.Environment(seed_id) controller = env_util.get_one_controller(seed_env) with tempfile.NamedTemporaryFile() as temp: db.mysqldump_from_env(orig_env, ['keystone'], temp.name) db.mysqldump_restore_to_env(seed_env, temp.name) ssh.call(['keystone-manage', 'db_sync'], node=controller, parse_levels=True) for controller in env_util.get_controllers(seed_env): ssh.call(['service', 'memcached', 'restart'], node=controller)
def rollback_control_plane(seed_id, orig_id): seed_env = environment_obj.Environment(seed_id) orig_env = environment_obj.Environment(orig_id) # switch physical networks connectivity to orig_env roles = ['primary-controller', 'controller'] for node, info in env_util.iter_deployment_info(seed_env, roles): network.delete_patch_ports(node, info) for node, info in env_util.iter_deployment_info(orig_env, roles): network.create_patch_ports(node, info) # enable cluster's services for orig_env maintenance.start_cluster(orig_env) maintenance.start_corosync_services(orig_env) maintenance.enable_apis(orig_env)
def upgrade_node(env_id, node_ids, isolated=False): # From check_deployment_status env = environment_obj.Environment(env_id) if env.data['status'] != 'new': raise Exception("Environment must be in 'new' status") nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) env_util.provision_nodes(env, nodes) call_handlers('predeploy') env_util.deploy_nodes(env, nodes) call_handlers('postdeploy')
def upgrade_node(env_id, node_ids, isolated=False, network_template=None): # From check_deployment_status env = environment_obj.Environment(env_id) nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) copy_patches_folder_to_nailgun() disk.update_partition_generator() call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) call_handlers('predeploy') if network_template: env_util.set_network_template(env, network_template) env_util.deploy_nodes(env, nodes) call_handlers('postdeploy')
def patch_partition_generator(env_id): """Update partitions generator for releases earlier than 6.0""" env = environment_obj.Environment(env_id) env_version = version.StrictVersion(env.data["fuel_version"]) if env_version < version.StrictVersion("6.0"): copy_patches_folder_to_nailgun() disk.update_partition_generator()
def upgrade_env(env_id): env = environment_obj.Environment(env_id) target_release = find_deployable_release("Ubuntu") seed_id = env_util.clone_env(env_id, target_release) env_util.cache_service_tenant_id(env) master_ip = env_util.get_astute_yaml(env)['master_ip'] env_util.change_env_settings(seed_id, master_ip) return seed_id
def change_env_settings(env_id, master_ip=''): # workaround for bugs related to DNS, NTP and TLS env = environment_obj.Environment(env_id) attrs = env.get_attributes() attrs['editable']['public_ssl']['horizon']['value'] = False attrs['editable']['public_ssl']['services']['value'] = False attrs['editable']['external_ntp']['ntp_list']['value'] = master_ip attrs['editable']['external_dns']['dns_list']['value'] = master_ip env.update_attributes(attrs)
def write_service_tenant_id(env_id): env = environment_obj.Environment(env_id) node = env_util.get_one_controller(env) tenant_id, _ = ssh.call([ "bash", "-c", ". /root/openrc;", "keystone tenant-list | ", "awk -F\| '\$2 ~ /id/{print \$3}' | tr -d \ " ], stdout=ssh.PIPE, node=node) tenant_file = '%s/env-%s-service-tenant-id' % (magic_consts.FUEL_CACHE, str(env_id)) with open(tenant_file, 'w') as f: f.write(tenant_id)
def upgrade_node(env_id, node_ids, isolated=False, network_template=None): # From check_deployment_status env = environment_obj.Environment(env_id) nodes = [node_obj.Node(node_id) for node_id in node_ids] # Sanity check one_orig_id = None for node in nodes: orig_id = node.data['cluster'] if orig_id == env_id: raise Exception( "Cannot upgrade node with ID %s: it's already in cluster with " "ID %s", node_id, env_id, ) if orig_id: if one_orig_id and orig_id != one_orig_id: raise Exception( "Not upgrading nodes from different clusters: %s and %s", orig_id, one_orig_id, ) one_orig_id = orig_id patch_partition_generator(one_orig_id) call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated) call_handlers('preupgrade') call_handlers('prepare') env_util.move_nodes(env, nodes) # NOTE(aroma): copying of VIPs must be done after node reassignment # as according to [1] otherwise the operation will not take any effect # [1]: https://bugs.launchpad.net/fuel/+bug/1549254 env_util.copy_vips(env) call_handlers('predeploy') if network_template: env_util.set_network_template(env, network_template) if isolated or len(nodes) == 1: env_util.deploy_nodes(env, nodes) else: env_util.deploy_changes(env, nodes) call_handlers('postdeploy')
def download(self, params): """Download deployment graph to stdout fuel graph --env 1 --download fuel graph --env 1 --download --tasks A B C fuel graph --env 1 --download --skip X Y --end pre_deployment fuel graph --env 1 --download --skip X Y --start post_deployment Specify output: fuel graph --env 1 --download > output/dir/file.gv Get parents only for task A: fuel graph --env 1 --download --parents-for A """ env = environment.Environment(params.env) parents_for = getattr(params, 'parents-for') used_params = "# params:\n" for param in ('start', 'end', 'skip', 'tasks', 'parents-for', 'remove'): used_params += "# - {0}: {1}\n".format(param, getattr(params, param)) tasks = params.tasks if not tasks or (params.skip or params.end or params.start): tasks = env.get_tasks(skip=params.skip, end=params.end, start=params.start, include=params.tasks) dotraph = env.get_deployment_tasks_graph(tasks, parents_for=parents_for, remove=params.remove) sys.stdout.write(six.text_type(used_params)) sys.stdout.write(six.text_type(dotraph))
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep): """Sync glance images from original ENV to seed ENV Args: source_env_id (int): ID of original ENV. seed_env_id (int): ID of seed ENV. seed_swift_ep (str): endpoint's name where swift-proxy service is listening on. Examples: sync_glance_images(2, 3, 'br-mgmt') """ # set glance username glance_user = "******" # set swift container value container = "glance" # choose tenant tenant = "services" # get clusters by id source_env = environment_obj.Environment(source_env_id) seed_env = environment_obj.Environment(seed_env_id) # gather cics admin IPs source_node = next(env_util.get_controllers(source_env)) seed_node = next(env_util.get_controllers(seed_env)) # get cics yaml files source_yaml = env_util.get_astute_yaml(source_env, source_node) seed_yaml = env_util.get_astute_yaml(seed_env, seed_node) # get glance passwords source_glance_pass = get_glance_password(source_yaml) seed_glance_pass = get_glance_password(seed_yaml) # get seed node swift ip seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml) # get service tenant id & lists of objects for source env source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass) source_swift_list = set( get_swift_objects(source_node, tenant, glance_user, source_glance_pass, source_token, container)) # get service tenant id & lists of objects for seed env seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass) seed_swift_list = set( get_swift_objects(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container)) # get service tenant for seed env seed_tenant = env_util.get_service_tenant_id(seed_env) # check consistency of matched images source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass) seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass) for image in source_swift_list & seed_swift_list: source_obj_etag = get_object_property(source_node, tenant, glance_user, source_glance_pass, source_token, container, image, 'ETag') seed_obj_etag = get_object_property(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, 'ETag') if source_obj_etag != seed_obj_etag: # image should be resynced delete_image(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image) LOG.info("Swift %s image should be resynced" % image) seed_swift_list.remove(image) # migrate new images for image in source_swift_list - seed_swift_list: # download image on source's node local drive source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass) download_image(source_node, tenant, glance_user, source_glance_pass, source_token, container, image) # transfer image source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass) seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass) transfer_image(source_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, seed_swift_ip, seed_tenant) # remove transferred image ssh.sftp(source_node).remove(image) # delete outdated images for image in seed_swift_list - source_swift_list: token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass) delete_image(seed_node, tenant, glance_user, seed_glance_pass, token, container, image)
def set_cobbler_provision(env_id): env = environment_obj.Environment(env_id) settings = env.get_settings_data() settings["editable"]["provision"]["method"]["value"] = "cobbler" env.set_settings_data(settings)