def _fix_fuel_mirror_config(self, admin_ip): cfg_path = '/usr/share/fuel-mirror/ubuntu.yaml' if MIRROR_UBUNTU != '': ubuntu_url = MIRROR_UBUNTU.split()[1] replace_cmd = \ "sed -i 's|http://archive.ubuntu.com/ubuntu|{0}|g' {1}"\ .format(ubuntu_url, cfg_path) logger.info( 'Replacing ubuntu mirrors in the fuel-mirror config with cmd:' ' {0}'.format(replace_cmd)) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd)
def deploy_multiple_services_local_mirror(self): """Deploy cluster with multiple services using local mirror Scenario: 1. Revert snapshot 'prepare_slaves_5' with default set of mirrors 2. Run 'fuel-mirror' to create mirror repositories 3. Create cluster with many components to check as many packages in local mirrors have correct dependencies 4. Run 'fuel-mirror' to replace cluster repositories with local mirrors 5. Check that repositories are changed 6. Deploy cluster 7. Check running services with OSTF Duration 140m """ self.show_step(1) self.env.revert_snapshot('ready_with_5_slaves') self.show_step(2) admin_ip = self.ssh_manager.admin_ip if MIRROR_UBUNTU != '': ubuntu_url = MIRROR_UBUNTU.split()[1] replace_cmd = \ "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ " /usr/share/fuel-mirror/ubuntu.yaml".format( ubuntu_url) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd) create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' self.env.admin_actions.ensure_cmd(create_mirror_cmd) self.show_step(3) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, mode=DEPLOYMENT_MODE, settings={ 'net_provider': 'neutron', 'net_segment_type': NEUTRON_SEGMENT['tun'], 'sahara': True, 'murano': True, 'ceilometer': True, 'volumes_lvm': True, 'volumes_ceph': False, 'images_ceph': True } ) self.show_step(4) apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ '--env {0} --replace'.format(cluster_id) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller', 'ceph-osd'], 'slave-02': ['compute', 'ceph-osd'], 'slave-03': ['cinder', 'ceph-osd'], 'slave-04': ['mongo'], 'slave-05': ['mongo'] } ) self.show_step(5) repos_ubuntu = self.fuel_web.get_cluster_repos(cluster_id) remote_repos = [] for repo_value in repos_ubuntu['value']: if (self.fuel_web.admin_node_ip not in repo_value['uri'] and '{settings.MASTER_IP}' not in repo_value['uri']): remote_repos.append({repo_value['name']: repo_value['uri']}) assert_true(not remote_repos, "Some repositories weren't replaced with local mirrors: " "{0}".format(remote_repos)) self.fuel_web.verify_network(cluster_id) self.show_step(6) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(7) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
def deploy_multiple_services_local_mirror(self): """Deploy cluster with multiple services using local mirror Scenario: 1. Revert snapshot 'prepare_slaves_5' with default set of mirrors 2. Run 'fuel-mirror' to create mirror repositories 3. Create cluster with many components to check as many packages in local mirrors have correct dependencies 4. Run 'fuel-mirror' to replace cluster repositories with local mirrors 5. Check that repositories are changed 6. Deploy cluster 7. Check running services with OSTF Duration 140m """ self.show_step(1) self.env.revert_snapshot('ready_with_5_slaves') self.show_step(2) admin_ip = self.ssh_manager.admin_ip if MIRROR_UBUNTU != '': ubuntu_url = MIRROR_UBUNTU.split()[1] replace_cmd = \ "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ " /usr/share/fuel-mirror/ubuntu.yaml".format( ubuntu_url) self.ssh_manager.check_call(ip=admin_ip, command=replace_cmd) create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' self.env.admin_actions.ensure_cmd(create_mirror_cmd) self.show_step(3) cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE, settings={ 'net_provider': 'neutron', 'net_segment_type': NEUTRON_SEGMENT['tun'], 'sahara': True, 'murano': True, 'ceilometer': True, 'volumes_lvm': True, 'volumes_ceph': False, 'images_ceph': True }) self.show_step(4) apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ '--env {0} --replace'.format(cluster_id) self.ssh_manager.check_call(ip=admin_ip, command=apply_mirror_cmd) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller', 'ceph-osd'], 'slave-02': ['compute', 'ceph-osd'], 'slave-03': ['cinder', 'ceph-osd'], 'slave-04': ['mongo'], 'slave-05': ['mongo'] }) self.show_step(5) repos_ubuntu = self.fuel_web.get_cluster_repos(cluster_id) remote_repos = [] for repo_value in repos_ubuntu['value']: if (self.fuel_web.admin_node_ip not in repo_value['uri'] and '{settings.MASTER_IP}' not in repo_value['uri']): remote_repos.append({repo_value['name']: repo_value['uri']}) assert_true( not remote_repos, "Some repositories weren't replaced with local mirrors: " "{0}".format(remote_repos)) self.fuel_web.verify_network(cluster_id) self.show_step(6) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(7) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
def separate_haproxy(self): """Deploy HA environment with separate Haproxy. Scenario: 1. Revert snapshot with ready master node 2. Copy and install external-lb and detach-haproxy plugins 3. Bootstrap 3 slaves from default nodegroup 4. Create cluster with Neutron VXLAN and custom nodegroups 5. Run 'fuel-mirror' to replace cluster repositories with local mirrors 6. Bootstrap 2 slaves nodes from second nodegroup and one node from third node group 7. Enable plugins for cluster 8. Add 2 controllers from default nodegroup and 1 controller from second node group 9. Add 1 compute+cinder from default node group and 1 compute+cinder from second node group 10. Add haproxy node from third node group 11. Verify networks 12. Deploy cluster Duration 120m Snapshot separate_haproxy """ if not MULTIPLE_NETWORKS: raise exceptions.FuelQAVariableNotSet('MULTIPLE_NETWORKS', 'true') self.show_step(1) self.env.revert_snapshot('ready') self.show_step(2) utils.upload_tarball(ip=self.ssh_manager.admin_ip, tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH, tar_target="/var") utils.upload_tarball(ip=self.ssh_manager.admin_ip, tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH, tar_target="/var") utils.install_plugin_check_code( ip=self.ssh_manager.admin_ip, plugin=os.path.basename(SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH)) utils.install_plugin_check_code( ip=self.ssh_manager.admin_ip, plugin=os.path.basename(SEPARATE_SERVICE_BALANCER_PLUGIN_PATH)) self.show_step(3) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) self.show_step(4) admin_ip = self.ssh_manager.admin_ip cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings={ 'net_provider': NEUTRON, 'net_segment_type': NEUTRON_SEGMENT['tun'], 'tenant': 'separatehaproxy', 'user': '******', 'password': '******', 'ntp_list': [admin_ip], }) self.show_step(5) if MIRROR_UBUNTU != '': ubuntu_url = MIRROR_UBUNTU.split()[1] replace_cmd = \ "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ " /usr/share/fuel-mirror/ubuntu.yaml".format( ubuntu_url) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd) create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd) apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ '--env {0} --replace'.format(cluster_id) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd) self.show_step(6) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7]) self.show_step(7) plugin_name = 'detach_haproxy' msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" asserts.assert_true( self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg) options = {'metadata/enabled': True} self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) plugin_name = 'external_loadbalancer' msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" asserts.assert_true( self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg) options = {'metadata/enabled': True} self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) self.show_step(8) self.show_step(9) self.show_step(10) nodegroup1 = NODEGROUPS[0]['name'] nodegroup2 = NODEGROUPS[1]['name'] nodegroup3 = NODEGROUPS[2]['name'] self.fuel_web.update_nodes( cluster_id, { 'slave-01': [['controller'], nodegroup1], 'slave-02': [['controller'], nodegroup1], 'slave-04': [['compute', 'cinder'], nodegroup2], 'slave-05': [['controller'], nodegroup2], 'slave-03': [['compute', 'cinder'], nodegroup1], 'slave-07': [['standalone-haproxy'], nodegroup3] }) self.show_step(11) self.fuel_web.verify_network(cluster_id) self.show_step(12) self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60, check_services=False) self.env.make_snapshot('separate_haproxy')
def separate_haproxy(self): """Deploy HA environment with separate Haproxy. Scenario: 1. Revert snapshot with ready master node 2. Copy and install external-lb and detach-haproxy plugins 3. Bootstrap 3 slaves from default nodegroup 4. Create cluster with Neutron VXLAN and custom nodegroups 5. Run 'fuel-mirror' to replace cluster repositories with local mirrors 6. Bootstrap 2 slaves nodes from second nodegroup and one node from third node group 7. Enable plugins for cluster 8. Add 2 controllers from default nodegroup and 1 controller from second node group 9. Add 1 compute+cinder from default node group and 1 compute+cinder from second node group 10. Add haproxy node from third node group 11. Verify networks 12. Deploy cluster Duration 120m Snapshot separate_haproxy """ if not MULTIPLE_NETWORKS: raise exceptions.FuelQAVariableNotSet( 'MULTIPLE_NETWORKS', 'true') self.show_step(1) self.env.revert_snapshot('ready') self.show_step(2) utils.upload_tarball( ip=self.ssh_manager.admin_ip, tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH, tar_target="/var") utils.upload_tarball( ip=self.ssh_manager.admin_ip, tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH, tar_target="/var") utils.install_plugin_check_code( ip=self.ssh_manager.admin_ip, plugin=os.path.basename( SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH)) utils.install_plugin_check_code( ip=self.ssh_manager.admin_ip, plugin=os.path.basename( SEPARATE_SERVICE_BALANCER_PLUGIN_PATH)) self.show_step(3) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3]) self.show_step(4) admin_ip = self.ssh_manager.admin_ip cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, settings={ 'net_provider': NEUTRON, 'net_segment_type': NEUTRON_SEGMENT['tun'], 'tenant': 'separatehaproxy', 'user': '******', 'password': '******', 'ntp_list': [admin_ip], } ) self.show_step(5) if MIRROR_UBUNTU != '': ubuntu_url = MIRROR_UBUNTU.split()[1] replace_cmd = \ "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \ " /usr/share/fuel-mirror/ubuntu.yaml".format( ubuntu_url) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd) create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu' self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd) apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \ '--env {0} --replace'.format(cluster_id) self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd) self.show_step(6) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7]) self.show_step(7) plugin_name = 'detach_haproxy' msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" asserts.assert_true( self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg) options = {'metadata/enabled': True} self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) plugin_name = 'external_loadbalancer' msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" asserts.assert_true( self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg) options = {'metadata/enabled': True} self.fuel_web.update_plugin_data(cluster_id, plugin_name, options) self.show_step(8) self.show_step(9) self.show_step(10) nodegroup1 = NODEGROUPS[0]['name'] nodegroup2 = NODEGROUPS[1]['name'] nodegroup3 = NODEGROUPS[2]['name'] self.fuel_web.update_nodes( cluster_id, { 'slave-01': [['controller'], nodegroup1], 'slave-02': [['controller'], nodegroup1], 'slave-04': [['compute', 'cinder'], nodegroup2], 'slave-05': [['controller'], nodegroup2], 'slave-03': [['compute', 'cinder'], nodegroup1], 'slave-07': [['standalone-haproxy'], nodegroup3] } ) self.show_step(11) self.fuel_web.verify_network(cluster_id) self.show_step(12) self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60, check_services=False) self.env.make_snapshot('separate_haproxy')