def cinder_ceph_for_images_ephemeral_rados(self): """Deployment with 3 controllers, NeutronVLAN, with cinder for volumes and ceph for images, ephemeral and Rados GW for objects Scenario: 1. Create new environment 2. Choose Neutron, VLAN 3. Choose cinder for volumes and ceph for images, ceph for ephemeral and Rados GW for objects 4. Add 3 controller 5. Add 2 compute 6. Add 3 ceph nodes 7. Add 1 cinder node 8. Change default public net mask from /24 to /25 9. Change default partitioning for ceph and cinder nodes for vdc 10. Change default dns server to any 2 public dns servers to the 'Host OS DNS Servers' on Settings tab 11. Change default ntp servers to any 2 public ntp servers to the 'Host OS NTP Servers' on Settings tab 12. Verify networks 13. Deploy cluster 14. Verify networks 15. Run OSTF Duration 180m Snapshot cinder_ceph_for_images_ephemeral_rados """ self.env.revert_snapshot("ready_with_9_slaves") data = { 'volumes_lvm': True, 'images_ceph': True, 'ephemeral_ceph': True, 'objects_ceph': True, 'tenant': 'cindercephforimagesephemeralrados', 'user': '******', 'password': '******', 'ntp_list': settings.EXTERNAL_NTP, 'dns_list': settings.EXTERNAL_DNS } self.show_step(1, initialize=True) cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings=data) self.show_step(2) self.show_step(3) self.show_step(4) self.show_step(5) self.show_step(6) self.show_step(7) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['compute'], 'slave-05': ['compute'], 'slave-06': ['ceph-osd'], 'slave-07': ['ceph-osd'], 'slave-08': ['ceph-osd'], 'slave-09': ['cinder'] }) self.show_step(8) self.fuel_web.update_network_cidr(cluster_id, 'public') self.show_step(9) self.show_step(10) self.show_step(11) ceph_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], role_status='pending_roles') for ceph_node in ceph_nodes: ceph_image_size = self.fuel_web.\ update_node_partitioning(ceph_node, node_role='ceph') cinder_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], role_status='pending_roles') for cinder_node in cinder_nodes: cinder_image_size = self.fuel_web.\ update_node_partitioning(cinder_node, node_role='cinder') self.show_step(12) self.fuel_web.verify_network(cluster_id) self.show_step(13) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.check_ceph_status(cluster_id) self.show_step(14) self.fuel_web.verify_network(cluster_id) for ceph in ceph_nodes: checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) for cinder in cinder_nodes: checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")
def four_controllers(self): """Deployment with 4 controllers, NeutronVLAN, and other disk configuration Scenario: 1. Create new environment 2. Choose Neutron, VLAN 3. Add 4 controllers 4. Add 2 computes 5. Add 3 cinders 6. Change disk configuration for all Cinder nodes. Change 'Cinder' volume for vdc 7. Verify networks 8. Deploy the environment 9. Verify networks 10. Check disk configuration 11. Run OSTF tests Notation: "By default recommended use uneven numbers of controllers, but nowhere there is information we cannot deploy with even numbers of controllers. So we need to check it." Duration: 180 min Snapshot: four_controllers """ self.env.revert_snapshot("ready_with_9_slaves") self.show_step(1, initialize=True) self.show_step(2) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, ) self.show_step(3) self.show_step(4) self.show_step(5) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['controller'], 'slave-05': ['compute'], 'slave-06': ['compute'], 'slave-07': ['cinder'], 'slave-08': ['cinder'], 'slave-09': ['cinder'], } ) self.show_step(6) n_cinders = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id=cluster_id, roles=['cinder'], role_status='pending_roles' ) for node in n_cinders: cinder_image_size = self.fuel_web.update_node_partioning(node) d_cinders = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_cinders) self.show_step(7) self.fuel_web.verify_network(cluster_id) self.show_step(8) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(9) self.fuel_web.verify_network(cluster_id) self.show_step(10) for d_cinder in d_cinders: with self.fuel_web.get_ssh_for_node(d_cinder.name) as remote: checkers.check_cinder_image_size(remote, cinder_image_size) self.show_step(11) self.fuel_web.run_ostf(cluster_id) self.env.make_snapshot('four_controllers')
def cinder_ceph_for_images_ephemeral_rados(self): """Deploy cluster with cinder volumes and ceph for images, ephemeral, rados Scenario: 1. Create cluster 2. Add 3 nodes with controller role 3. Add 2 nodes with compute role 4. Add 3 nodes with ceph OSD roles 5. Add 1 cinder node 6. Change disks configuration for ceph and cinder nodes 7. Change default dns server 8. Change default NTP server 9. Change public net mask from /24 to /25 10. Verify networks 11. Deploy the cluster 12. Check ceph status 13. Verify networks 14. Check ceph disks partitioning 15. Run OSTF Duration 180m Snapshot cinder_ceph_for_images_ephemeral_rados """ self.env.revert_snapshot("ready_with_9_slaves") data = { 'volumes_lvm': True, 'images_ceph': True, 'ephemeral_ceph': True, 'objects_ceph': True, 'tenant': 'cindercephforimagesephemeralrados', 'user': '******', 'password': '******', 'ntp_list': settings.EXTERNAL_NTP, 'dns_list': settings.EXTERNAL_DNS } self.show_step(1, initialize=True) cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings=data) self.show_step(2) self.show_step(3) self.show_step(4) self.show_step(5) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['compute'], 'slave-05': ['compute'], 'slave-06': ['ceph-osd'], 'slave-07': ['ceph-osd'], 'slave-08': ['ceph-osd'], 'slave-09': ['cinder'] }) self.show_step(9) self.fuel_web.update_network_cidr(cluster_id, 'public') self.show_step(6) self.show_step(7) self.show_step(8) ceph_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], role_status='pending_roles') d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes) for ceph_node in ceph_nodes: ceph_image_size = self.fuel_web.\ update_node_partitioning(ceph_node, node_role='ceph') cinder_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], role_status='pending_roles') d_cinder = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes) for cinder_node in cinder_nodes: cinder_image_size = self.fuel_web.\ update_node_partitioning(cinder_node, node_role='cinder') self.show_step(10) self.fuel_web.verify_network(cluster_id) self.show_step(11) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(12) self.fuel_web.check_ceph_status(cluster_id) self.show_step(13) self.fuel_web.verify_network(cluster_id) self.show_step(14) for devops_ceph in d_ceph: with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote: checkers.check_ceph_image_size(remote, ceph_image_size) for devops_cinder in d_cinder: with self.fuel_web.get_ssh_for_node(devops_cinder.name) as remote: checkers.check_cinder_image_size(remote, cinder_image_size) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")
def deploy_reset_five_controllers(self): """Deployment with 5 controllers, NeutronVLAN with reset and re-deploy Scenario: 1. Deploy environment with 5 controller NeutronVLAN, 2 compute, 1 cinder with disks partitioning 'vdc' 2. Verify networks 3. Run OSTF tests 4. Reset cluster 5. Change openstack username, password, tenant 6. Re-deploy environment 7. Wait for HA services to be ready 8. Wait for for OS services to be ready 9. Verify networks 10. Run OSTF Duration 120m Snapshot deploy_reset_five_controllers """ self.env.revert_snapshot("ready_with_all_slaves") self.show_step(1) cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE, settings={ "net_provider": 'neutron', "net_segment_type": NEUTRON_SEGMENT['vlan'], 'tenant': 'simpleVlan', 'user': '******', 'password': '******' }) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['controller'], 'slave-05': ['controller'], 'slave-06': ['compute'], 'slave-07': ['compute'], 'slave-08': ['cinder'] }) cinder_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], role_status='pending_roles') for cinder_node in cinder_nodes: cinder_image_size = self.fuel_web.\ update_node_partitioning(cinder_node, node_role='cinder') self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(2) self.fuel_web.verify_network(cluster_id) for cinder in cinder_nodes: checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) # ostf_tests before reset self.show_step(3) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.show_step(4) self.fuel_web.stop_reset_env_wait(cluster_id) self.show_step(5) attributes = self.fuel_web.client.get_cluster_attributes(cluster_id) access_attr = attributes['editable']['access'] access_attr['user']['value'] = 'myNewUser' access_attr['password']['value'] = 'myNewPassword' access_attr['tenant']['value'] = 'myNewTenant' self.fuel_web.client.update_cluster_attributes(cluster_id, attributes) self.show_step(6) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(7) self.fuel_web.assert_ha_services_ready(cluster_id) self.show_step(8) self.fuel_web.assert_os_services_ready(cluster_id, timeout=10 * 60) self.show_step(9) self.fuel_web.verify_network(cluster_id) # ostf_tests after reset self.show_step(10) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity']) self.env.make_snapshot("deploy_reset_five_controllers")
def cinder_ceph_for_images_ephemeral_rados(self): """Deployment with 3 controllers, NeutronVLAN, with cinder for volumes and ceph for images, ephemeral and Rados GW for objects Scenario: 1. Create new environment 2. Choose Neutron, VLAN 3. Choose cinder for volumes and ceph for images, ceph for ephemeral and Rados GW for objects 4. Add 3 controller 5. Add 2 compute 6. Add 3 ceph nodes 7. Add 1 cinder node 8. Change default public net mask from /24 to /25 9. Change default partitioning for ceph and cinder nodes for vdc 10. Change default dns server to any 2 public dns servers to the 'Host OS DNS Servers' on Settings tab 11. Change default ntp servers to any 2 public ntp servers to the 'Host OS NTP Servers' on Settings tab 12. Verify networks 13. Deploy cluster 14. Verify networks 15. Run OSTF Duration 180m Snapshot cinder_ceph_for_images_ephemeral_rados """ self.env.revert_snapshot("ready_with_9_slaves") data = { 'volumes_lvm': True, 'images_ceph': True, 'ephemeral_ceph': True, 'objects_ceph': True, 'tenant': 'cindercephforimagesephemeralrados', 'user': '******', 'password': '******', 'ntp_list': settings.EXTERNAL_NTP, 'dns_list': settings.EXTERNAL_DNS } self.show_step(1, initialize=True) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, settings=data ) self.show_step(2) self.show_step(3) self.show_step(4) self.show_step(5) self.show_step(6) self.show_step(7) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['compute'], 'slave-05': ['compute'], 'slave-06': ['ceph-osd'], 'slave-07': ['ceph-osd'], 'slave-08': ['ceph-osd'], 'slave-09': ['cinder'] } ) self.show_step(8) self.fuel_web.update_network_cidr(cluster_id, 'public') self.show_step(9) self.show_step(10) self.show_step(11) ceph_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], role_status='pending_roles') for ceph_node in ceph_nodes: ceph_image_size = self.fuel_web.\ update_node_partitioning(ceph_node, node_role='ceph') cinder_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], role_status='pending_roles') for cinder_node in cinder_nodes: cinder_image_size = self.fuel_web.\ update_node_partitioning(cinder_node, node_role='cinder') self.show_step(12) self.fuel_web.verify_network(cluster_id) self.show_step(13) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.check_ceph_status(cluster_id) self.show_step(14) self.fuel_web.verify_network(cluster_id) for ceph in ceph_nodes: checkers.check_ceph_image_size(ceph['ip'], ceph_image_size) for cinder in cinder_nodes: checkers.check_cinder_image_size(cinder['ip'], cinder_image_size) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")
def cinder_ceph_for_images_ephemeral_rados(self): """Deploy cluster with cinder volumes and ceph for images, ephemeral, rados Scenario: 1. Create cluster 2. Add 3 nodes with controller role 3. Add 2 nodes with compute role 4. Add 3 nodes with ceph OSD roles 5. Add 1 cinder node 6. Change disks configuration for ceph and cinder nodes 7. Change default dns server 8. Change default NTP server 9. Change public net mask from /24 to /25 10. Verify networks 11. Deploy the cluster 12. Check ceph status 13. Verify networks 14. Check ceph disks partitioning 15. Run OSTF Duration 180m Snapshot cinder_ceph_for_images_ephemeral_rados """ self.env.revert_snapshot("ready_with_9_slaves") data = { 'volumes_lvm': True, 'images_ceph': True, 'ephemeral_ceph': True, 'objects_ceph': True, 'tenant': 'cindercephforimagesephemeralrados', 'user': '******', 'password': '******', 'ntp_list': settings.EXTERNAL_NTP, 'dns_list': settings.EXTERNAL_DNS } self.show_step(1, initialize=True) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, settings=data ) self.show_step(2) self.show_step(3) self.show_step(4) self.show_step(5) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['controller'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['compute'], 'slave-05': ['compute'], 'slave-06': ['ceph-osd'], 'slave-07': ['ceph-osd'], 'slave-08': ['ceph-osd'], 'slave-09': ['cinder'] } ) self.show_step(9) self.fuel_web.update_network_cidr(cluster_id, 'public') self.show_step(6) self.show_step(7) self.show_step(8) ceph_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'], role_status='pending_roles') d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes) for ceph_node in ceph_nodes: ceph_image_size = self.fuel_web.\ update_node_partitioning(ceph_node, node_role='ceph') cinder_nodes = self.fuel_web.\ get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'], role_status='pending_roles') d_cinder = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes) for cinder_node in cinder_nodes: cinder_image_size = self.fuel_web.\ update_node_partitioning(cinder_node, node_role='cinder') self.show_step(10) self.fuel_web.verify_network(cluster_id) self.show_step(11) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(12) self.fuel_web.check_ceph_status(cluster_id) self.show_step(13) self.fuel_web.verify_network(cluster_id) self.show_step(14) for devops_ceph in d_ceph: with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote: checkers.check_ceph_image_size(remote, ceph_image_size) for devops_cinder in d_cinder: with self.fuel_web.get_ssh_for_node(devops_cinder.name) as remote: checkers.check_cinder_image_size(remote, cinder_image_size) self.show_step(15) self.fuel_web.run_ostf(cluster_id=cluster_id) self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")