def test_network_restore(self):
        try:
            reporting.add_test_script(str(__name__))
            self.delete_network_topology()
            ntwrks = self.create_network()
            vms = {}
            nws = [x['id'] for x in ntwrks]
            vmid = self.create_vm(vm_name="instance",
                                  networkid=[{
                                      'uuid': random.choice(nws)
                                  }],
                                  vm_cleanup=True)

            nt_bf, sbnt_bf, rt_bf, intf_bf = self.get_topology_details()

            workload_id = self.workload_create([vmid],
                                               tvaultconf.parallel,
                                               workload_cleanup=True)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            snapshot_id = self.workload_snapshot(workload_id,
                                                 True,
                                                 snapshot_cleanup=True)
            time.sleep(5)
            self.wait_for_workload_tobe_available(workload_id)
            if (self.getSnapshotStatus(workload_id,
                                       snapshot_id) == "available"):
                reporting.add_test_step("Create full snapshot",
                                        tvaultconf.PASS)
                LOG.debug("Full snapshot available!!")
            else:
                reporting.add_test_step("Create full snapshot",
                                        tvaultconf.FAIL)
                raise Exception("Snapshot creation failed")

            instance_details = []
            vm_name = "restored_instance"
            temp_instance_data = {
                'id': vmid,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': []
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            self.delete_vm(vmid)
            self.delete_network_topology()

            restore_id = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                instance_details=instance_details,
                network_restore_flag=True)

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id) == "available"):
                reporting.add_test_step(
                    "Selective restore with network restore", tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Selective restore with network restore", tvaultconf.FAIL)
                raise Exception(
                    "Selective restore with network restore failed")

            nt_af, sbnt_af, rt_af, intf_af = self.get_topology_details()
            if nt_bf == nt_af:
                reporting.add_test_step(
                    "Verify network details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify network details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Network details before and after restore: %s , %s" %
                    nt_bf, nt_af)

            if sbnt_bf == sbnt_af:
                reporting.add_test_step(
                    "Verify subnet details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify subnet details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Subnet details before and after restore: %s , %s" %
                    sbnt_bf, sbnt_af)

            if rt_bf == rt_af:
                reporting.add_test_step(
                    "Verify router details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify router details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Router details before and after restore: %s , %s" % rt_bf,
                    rt_af)

            if intf_bf == intf_af:
                reporting.add_test_step(
                    "Verify interface details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify interface details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Interface details before and after restore: %s , %s" %
                    intf_bf, intf_af)

            self.delete_vm(self.get_restored_vm_list(restore_id)[0])
            self.delete_network_topology()
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write
Esempio n. 2
0
    def test_3_regression(self):
        reporting.add_test_script(str(__name__) + "_inplace_restore_cli")
        try:
            LOG.debug("pre req completed")

            volumes = tvaultconf.volumes_parts
            mount_points = ["mount_data_b", "mount_data_c"]

            #calculate md5 sum before
            tree = lambda: collections.defaultdict(tree)
            self.md5sums_dir_before = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.md5sums_dir_before[str(self.floating_ips_list[0])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before))

            #Fill some data on each of the volumes attached
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 2)
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 2)
            self.addCustomSizedfilesOnLinux(ssh, mount_points[1], 2)
            ssh.close()

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(self.incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        self.workload_instances[0],
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': self.volumes_list[0],
                            'new_volume_type': CONF.volume.volume_type
                        }]
                    }, {
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        self.workload_instances[1],
                        'vdisks': [{
                            'restore_cinder_volme': True,
                            'id': self.volumes_list[1],
                            'new_volume_type': CONF.volume.volume_type
                        }]
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            self.restore_id = query_data.get_snapshot_restore_id(
                self.incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(self.workload_id,
                                                  self.incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(self.workload_id, self.incr_snapshot_id,
                                      self.restore_id) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # mount volumes after restore
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.execute_command_disk_mount(ssh,
                                            str(self.floating_ips_list[0]),
                                            [volumes[0]], [mount_points[0]])
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.execute_command_disk_mount(ssh,
                                            str(self.floating_ips_list[1]),
                                            volumes, mount_points)
            ssh.close()

            # calculate md5 after inplace restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_in_place_restore = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            md5_sum_after_in_place_restore[str(self.floating_ips_list[0])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5_sum_after_in_place_restore" +
                      str(md5_sum_after_in_place_restore))

            #md5 sum verification

            if self.md5sums_dir_before[str(self.floating_ips_list[0])][str(
                    mount_points[0])] == md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[0])][str(mount_points[0])]:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[0])] == md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[0])]:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[1])] != md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[1])]:
                reporting.add_test_step("Md5 Verification for volume 3",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 3",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()

        finally:
            #Delete restore for snapshot
            self.restored_volumes = self.get_restored_volume_list(
                self.restore_id)
            if tvaultconf.cleanup == True:
                self.restore_delete(self.workload_id, self.incr_snapshot_id,
                                    self.restore_id)
                LOG.debug("Snapshot Restore deleted successfully")

                #Delete restored volumes and volume snapshots
                self.delete_volumes(self.restored_volumes)
Esempio n. 3
0
    def test_6_regression(self):
        reporting.add_test_script(
            str(__name__) + "_one_click_restore_bootfrom_image")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.created = False

            #Delete the original instance
            self.delete_vms(self.workload_instances)
            self.delete_key_pair(tvaultconf.key_pair_name)
            self.delete_security_group(self.security_group_id)
            self.delete_flavor(self.flavor_id)
            LOG.debug("Instances deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[
                1]
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_ids[1])
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_ids[1])
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Fetch instance details after restore
            self.restored_vm_details_list = []

            #restored vms list
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            #restored vms all details list
            for id in range(len(self.workload_instances)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            #required details of restored vms
            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Verify floating ips
            self.floating_ips_after_restore = []
            for i in range(len(self.vms_details_after_restore)):
                self.floating_ips_after_restore.append(
                    self.vms_details_after_restore[i]['floating_ip'])
            if (self.floating_ips_after_restore.sort() ==
                    self.floating_ips_list.sort()):
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.PASS)
            else:
                LOG.error("Floating ips before restore: " +
                          str(self.floating_ips_list.sort()))
                LOG.error("Floating ips after restore: " +
                          str(self.floating_ips_after_restore.sort()))
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

        #calculate md5sum after restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_oneclick_restore = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    md5_sum_after_oneclick_restore[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()
            LOG.debug("md5_sum_after_oneclick_restore" +
                      str(md5_sum_after_oneclick_restore))

            #md5sum verification
            if (self.md5sums_dir_before == md5_sum_after_oneclick_restore):
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
            else:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_1_config_workload_configure(self):
        reporting.add_test_script(str(__name__) + "_configure")
        try:
            # prerequisite handles config_user creation and config_backup_pvk(private key) creation

            # for config backup configuration, yaml_file creation
            self.create_config_backup_yaml()

            # config backup configuration with CLI command
            config_workload_command = command_argument_string.config_workload_configure + " --config-file yaml_file.yaml --authorized-key config_backup_pvk "

            LOG.debug("config workload configure cli command: " +
                      str(config_workload_command))

            rc = cli_parser.cli_returncode(config_workload_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            config_workload_output = self.get_config_workload()
            LOG.debug("config_workload_show_command output: " +
                      str(config_workload_output))

            for metadata in config_workload_output['metadata']:
                if metadata['key'] == 'services_to_backup':
                    config_dirs = metadata['value']

            found = False
            for key in tvaultconf.config_yaml.keys():
                if key in config_dirs:
                    found = True

            if found:
                LOG.debug("Config dirs existance:  True")
                reporting.add_test_step("Config dirs existance",
                                        tvaultconf.PASS)
            else:
                LOG.debug("Config dirs existance:  False")
                reporting.add_test_step("Config dirs existance",
                                        tvaultconf.FAIL)

            config_workload_status = config_workload_output['status']

            if config_workload_status == "available":
                LOG.debug(
                    "config_workload status is available, config_workload_id: "
                    + config_workload_output['id'])
                reporting.add_test_step("config_workload status: available",
                                        tvaultconf.PASS)
            else:
                LOG.debug(
                    "config_workload status is not available, Error msg: " +
                    config_workload_output['error_msg'])
                reporting.add_test_step(
                    "config_workload status: " +
                    config_workload_output['status'], tvaultconf.FAIL)
                raise Exception("Config Workload Configure Failed.")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault_rbac_backuprole_touser_policyjson(self):
        try:
            workload_create_error_str = "Policy doesn't allow workload:workload_create to be performed."

            # Change policy.json file on tvault to change role and rule
            self.change_policyjson_file("backup", "backup_api")
            self.instances_id = []

            # Create volume, Launch an Instance
            self.volumes_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume-1 ID: " + str(self.volumes_id))
            self.instances_id.append(self.create_vm(vm_cleanup=False))
            LOG.debug("VM-1 ID: " + str(self.instances_id[0]))
            self.attach_volume(self.volumes_id, self.instances_id[0])
            LOG.debug("Volume attached")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Create workload with CLI by backup role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.instances_id[0])
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                LOG.debug("workload creation unsuccessful by backup role")
                raise Exception(
                    "RBAC policy fails for workload creation by backup role")
            else:
                LOG.debug("Workload created successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_create command by backup role",
                    tvaultconf.PASS)
                time.sleep(10)
                self.wid1 = query_data.get_workload_id(
                    tvaultconf.workload_name)
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Run snapshot_create CLI by backup role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create executed correctly by backup role"
                )
                self.snapshot_id1 = query_data.get_inprogress_snapshot_id(
                    self.wid1)
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)

        #Delete the original instance
            self.delete_vm(self.instances_id[0])
            LOG.debug("Instance deleted successfully for restore")

            #Delete corresponding volume
            self.delete_volume(self.volumes_id)
            LOG.debug("Volume deleted successfully for restore")

            #Create one-click restore using CLI command by backup role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command one-click restore did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command one-click restore executed correctly backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                self.restore_id1 = query_data.get_snapshot_restore_id(
                    self.snapshot_id1)
                LOG.debug("Restore ID: " + str(self.restore_id1))
                self.restore_vm_id1 = self.get_restored_vm_list(
                    self.restore_id1)
                LOG.debug("Restore VM ID: " + str(self.restore_vm_id1))
                self.restore_volume_id1 = self.get_restored_volume_list(
                    self.restore_id1)
                LOG.debug("Restore Volume ID: " + str(self.restore_volume_id1))

        # Use admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.username
            os.environ['OS_PASSWORD'] = CONF.identity.password

            # Create workload with CLI by admin role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.restore_vm_id1)
            error = cli_parser.cli_error(workload_create)
            if error and (str(
                    error.strip('\n')).find(workload_create_error_str) != -1):
                LOG.debug(
                    "Command workload_create did not execute correctly by admin role"
                )
                reporting.add_test_step(
                    "Can not execute workload_create command by admin role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Command workload_create did not execute correctly by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by admin role")

        # Run snapshot_create CLI by admin role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by admin role")

        #Create one-click restore using CLI command by admin role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by admin role")

        # Run restore_delete CLI by admin role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by admin role")

        # Run snapshot_delete CLI by admin role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by admin role")

        # Delete workload with CLI by admin role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by admin role")

        # Use nonadmin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password

            # Create workload with CLI by default role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.restore_vm_id1)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                LOG.debug(
                    "Command workload_create did not execute correctly by default role"
                )
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by default role"
                )

        # Run snapshot_create CLI by default role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by default role"
                )

        # Create one-click restore using CLI by default role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by default role"
                )

        # Run restore_delete CLI by default role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by default role"
                )

            # Run snapshot_delete CLI by default role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by default role"
                )

        # Delete workload with CLI by default role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by default role"
                )

        # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Run restore_delete CLI by backup role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute  restore_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command  restore_delete did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute restore_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete executed correctly by backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                #Delete restored VM instance and volume
                self.delete_restored_vms(self.restore_vm_id1,
                                         self.restore_volume_id1)
                LOG.debug("Restored VMs deleted successfully by backup role")

            # Run snapshot_delete CLI by backup role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete executed correctly by backup role"
                )
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

        # Delete workload with CLI by backup role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for workload deletion by backup role")
            else:
                LOG.debug("Workload deleted successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.PASS)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 6
0
    def test_1_image_booted(self):
        try:
            ### Create vm and workload ###
            deleted = 0
            reporting.add_test_script(str(__name__))

            self.created = False
            vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("\nVm id : {}\n".format(str(vm_id)))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            LOG.debug("\nworkload id : {}\n".format(str(workload_id)))
            LOG.debug("\nvm id : {}\n".format(str(vm_id)))
            time.sleep(40)
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("\nFull-snapshot ID: {}".format(str(snapshot_id)))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            ### Incremental snapshot ###

            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("\nIncremental-snapshot ID: {}".format(
                str(incr_snapshot_id)))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details_list = []
            vms_details_after_restore = []
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore

            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_cleanup=True,
                restore_name=tvaultconf.restore_name,
                instance_details=instance_details,
                network_details=network_details)
            LOG.debug("\nselective-restore id : {}\n".format(
                str(restore_id_1)))
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
                LOG.debug("Selective restore passed")
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                LOG.debug("Selective restore failed")
                raise Exception("Selective restore failed")
            LOG.debug("selective restore complete")

            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            for id in range(len(vm_list)):
                restored_vm_details_list.append(
                    self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(restored_vm_details_list))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

#Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk': True,
                        'include': True,
                        'id': vm_id
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("\ninplace-restore id : {}\n".format(str(restore_id_2)))

            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)
            LOG.debug("Snapshot Restore(in-place) deleted successfully")

            ### One-click Restore ###

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("\nRestore ID: {}\n".format(str(restore_id_3)))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            vm_list = self.get_restored_vm_list(restore_id_3)

            LOG.debug("Restored vms : " + str(vm_list))

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.delete_vm(vm_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1043_show_workload(self):
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []
            #Launch instance
            self.vm_id = self.create_vm(vm_name="bootfromvol_vm")
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            #Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))

            #Show workload details using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_show + self.wid)
            if rc != 0:
                reporting.add_test_step("Execute workload-show command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-show command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #Compare the workload details against database
            out = cli_parser.cli_output(command_argument_string.workload_show +
                                        self.wid)
            LOG.debug("Response from DB: " + str(out))

            if (query_data.get_workload_display_name(
                    self.wid) == cli_parser.cli_response_parser(out, 'name')):
                reporting.add_test_step("Verify workload name",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Verify workload name",
                                        tvaultconf.FAIL)
            if (query_data.get_workload_display_description(
                    self.wid) == cli_parser.cli_response_parser(
                        out, 'description')):
                reporting.add_test_step("Verify workload description",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Verify workload description",
                                        tvaultconf.FAIL)
            if (query_data.get_workload_status_by_id(
                    self.wid) == cli_parser.cli_response_parser(out,
                                                                'status')):
                reporting.add_test_step("Verify workload status",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Verify workload status",
                                        tvaultconf.FAIL)

            instances_cli = []
            temp = json.loads(cli_parser.cli_response_parser(out, 'instances'))
            for i in range(0, len(temp)):
                instances_cli.append(temp[i]['id'])
            instances_cli.sort()
            instances_db = query_data.get_workload_vmids(self.wid)
            instances_db.sort()
            if (instances_db == instances_cli):
                reporting.add_test_step("Verify workload instances",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Verify workload instances",
                                        tvaultconf.FAIL)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_before_upgrade(self):
        self.vms_per_workload = 1
        self.volume_size = 1
        self.workload_instances = []
        self.workload_volumes = []

        try:
            f = open("tempest/upgrade_data_conf.py", "w")
            if tvaultconf.global_job_scheduler:
                self.scheduler_status = self.enable_global_job_scheduler()
                if (self.scheduler_status == 'false'):
                    reporting.add_test_step(
                        "Enable global job scheduler", tvaultconf.FAIL)
                    raise Exception("Enable global job scheduler failed")
                else:
                    reporting.add_test_step(
                        "Enable global job scheduler", tvaultconf.PASS)
            else:
                self.scheduler_status = self.disable_global_job_scheduler()
                if (self.scheduler_status == 'true'):
                    reporting.add_test_step(
                        "Disable global job scheduler", tvaultconf.FAIL)
                    raise Exception("Disable global job scheduler failed")
                else:
                    reporting.add_test_step(
                        "Disable global job scheduler", tvaultconf.PASS)

            # Fetch license details
            self.license_details = self.get_license_list()
            LOG.debug("License details: " + str(self.license_details))
            f.write("license_details=" + str(self.license_details) + "\n")

            # Update user email in openstack
            self.update_user_email = self.update_user_email(
                CONF.identity.user_id, CONF.identity.user_email, CONF.identity.tenant_id)
            f.write("update_user_email_in_openstack=" +
                    str(self.update_user_email) + "\n")
            if self.update_user_email:
                reporting.add_test_step(
                    "Update email for user in openstack", tvaultconf.PASS)

                # Fetch existing settings
                self.existing_setting = self.get_settings_list()
                LOG.debug("Existing setting list: " +
                          str(self.existing_setting))
                # Delete any existing settings
                flag = False
                if(self.existing_setting != {}):
                    for k, v in self.existing_setting.items():
                        if (self.delete_setting(k) == False):
                            flag = True
                if flag:
                    reporting.add_test_step(
                        "Delete existing setting", tvaultconf.FAIL)
                else:
                    # Update trilioVault email settings
                    self.settings_resp = self.update_email_setings(
                        tvaultconf.setting_data)
                    f.write("settings_list=" + str(self.settings_resp) + "\n")
                    self.setting_data_from_resp = {}
                    for i in range(0, len(self.settings_resp)):
                        self.setting_data_from_resp[self.settings_resp[i][
                            'name']] = self.settings_resp[i]['value']
                    LOG.debug("Settings data from response: " +
                              str(self.setting_data_from_resp) +
                              " ; original setting data: " +
                              str(tvaultconf.setting_data))

                    if(cmp(self.setting_data_from_resp, tvaultconf.setting_data) == 0):
                        reporting.add_test_step(
                            "Update email settings", tvaultconf.PASS)

                        # Enable email notification for project
                        self.enable_email_resp = self.update_email_setings(
                            tvaultconf.enable_email_notification)[0]
                        f.write("email_enabled_settings=" +
                                str(self.enable_email_resp) + "\n")
                        if((str(self.enable_email_resp['name']) == 'smtp_email_enable') and (str(self.enable_email_resp['value']) == '1')):
                            reporting.add_test_step(
                                "Enable email notification for project", tvaultconf.PASS)
                        else:
                            reporting.add_test_step(
                                "Enable email notification for project", tvaultconf.FAIL)
                            reporting.set_test_script_status(tvaultconf.FAIL)
                    else:
                        reporting.add_test_step(
                            "Update email settings", tvaultconf.FAIL)
                        reporting.set_test_script_status(tvaultconf.FAIL)

            else:
                reporting.add_test_step(
                    "Update email for user in openstack", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            # Create workload-1
            for vm in range(0, self.vms_per_workload):
                volume_id1 = self.create_volume()
                self.workload_volumes.append(volume_id1)
                vm_id = self.create_vm(vm_cleanup=False)
                self.workload_instances.append(vm_id)
                f.write("instance_id=" + str(self.workload_instances) + "\n")
                self.attach_volume(volume_id1, vm_id, device="/dev/vdb")
                f.write("volume_ids=" + str(self.workload_volumes) + "\n")

            self.start_date = time.strftime("%x")
            self.start_time = time.strftime("%X")
            self.jobschedule = {
                "fullbackup_interval": "-1",
                "retention_policy_type": tvaultconf.retention_policy_type,
                "enabled": True,
                "start_date": self.start_date,
                "start_time": self.start_time,
                "interval": tvaultconf.interval,
                "retention_policy_value": tvaultconf.retention_policy_value}
            self.workload_id = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                self.jobschedule,
                workload_cleanup=False)
            if(self.wait_for_workload_tobe_available(self.workload_id)):
                reporting.add_test_step(
                    "Create Workload 1 for attached volume instance with scheduler enabled",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Create Workload 1 for attached volume instance with scheduler enabled",
                    tvaultconf.FAIL)
                raise Exception("Workload creation failed")
            f.write("workload_id=\"" + str(self.workload_id) + "\"\n")

            # Create workload-2
            self.volumes = []
            self.instances = []
            self.volume_id = self.create_volume(
                size=tvaultconf.bootfromvol_vol_size,
                image_id=CONF.compute.image_ref,
                volume_type_id=CONF.volume.volume_type_id)
            self.set_volume_as_bootable(self.volume_id)
            self.block_mapping_details = [{"source_type": "volume",
                                           "delete_on_termination": "false",
                                           "boot_index": 0,
                                           "uuid": self.volume_id,
                                           "destination_type": "volume"}]
            self.volumes.append(self.volume_id)
            f.write("volume_ids_2=" + str(self.volumes) + "\n")
            self.vm_id = self.create_vm(
                image_id="", block_mapping_data=self.block_mapping_details)
            self.instances.append(self.vm_id)
            f.write("instance_id_2=" + str(self.instances) + "\n")

            self.workload_id2 = self.workload_create(
                self.instances, tvaultconf.parallel, jobschedule={
                    'enabled': False}, workload_cleanup=False)
            if(self.wait_for_workload_tobe_available(self.workload_id2)):
                reporting.add_test_step(
                    "Create Workload 2 for boot from volume instance with scheduler disabled",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Create Workload 2 for boot from volume instance with scheduler disabled",
                    tvaultconf.FAIL)
                raise Exception("Workload creation failed")
            f.write("workload_id_2=\"" + str(self.workload_id2) + "\"\n")

            # Fetch workload scheduler and retention settings for workloads
            self.workloads = [self.workload_id, self.workload_id2]
            for i in range(0, len(self.workloads)):
                self.scheduler_settings = self.getSchedulerDetails(
                    self.workloads[i])
                LOG.debug("Workload scheduler settings: " +
                          str(self.scheduler_settings))
                if(i == 0):
                    f.write("scheduler_settings=" +
                            str(self.scheduler_settings) + "\n")
                else:
                    f.write("scheduler_settings_2=" +
                            str(self.scheduler_settings) + "\n")

            # Create full snapshots for workloads 1 & 2
            self.snapshots = []
            for i in range(0, len(self.workloads)):
                self.snapshot_id = self.workload_snapshot(
                    self.workloads[i], True, snapshot_cleanup=False)
                self.snapshots.append(self.snapshot_id)
                if(i == 0):
                    f.write("full_snapshot_id=\"" +
                            str(self.snapshot_id) + "\"\n")
                else:
                    f.write("full_snapshot_id_2=\"" +
                            str(self.snapshot_id) + "\"\n")

            for i in range(0, len(self.workloads)):
                self.wait_for_workload_tobe_available(self.workloads[i])
                if(self.getSnapshotStatus(self.workloads[i], self.snapshots[i]) == "available"):
                    reporting.add_test_step(
                        "Create full snapshot for workload " + str(i + 1), tvaultconf.PASS)
                else:
                    reporting.add_test_step(
                        "Create full snapshot for workload " + str(i + 1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            # Fetch trust details
            self.trust_details = self.get_trust_list()
            LOG.debug("Trust details: " + str(self.trust_details))
            f.write("trust_details=" + str(self.trust_details) + "\n")

            f.close()
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_3_modify_workload_scheduler_enable(self):
        reporting.add_test_script(str(__name__) + "_scheduler_enable")
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []

            #Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID-3: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID-3: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached-3")

            #Create workload with scheduler disabled using CLI
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.vm_id) + " --jobschedule enabled=False"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload create did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command workload create executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID-3: " + str(self.wid))
            if (self.wid != None):
                self.wait_for_workload_tobe_available(self.wid)
                if (self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.PASS)
                else:
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step(
                    "Create workload with scheduler disable", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
            LOG.debug("Workload ID: " + str(self.wid))

            #Verify workload created scheduler disable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Workload has not been created with scheduler disabled")
            else:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug(
                    "Workload created with scheduler disabled successfully")

#Get workload scheduler details
            schedule_details = self.getSchedulerDetails(self.wid)
            scheduled_start_time = schedule_details['start_time']
            interval = schedule_details['interval']

            #Change global job scheduler to disable
            LOG.debug("Change Global job scheduler to disable")
            status = self.disable_global_job_scheduler()
            if not status:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler disabled successfully")
            else:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not disabled")

            #Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid) + " --jobschedule enabled=True"
            error = cli_parser.cli_error(workload_modify_command)
            if error and str(
                    error.strip('\n')
            ) == "ERROR: Cannot update scheduler related fields when global jobscheduler is disabled.":
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.PASS)
                LOG.debug("Error message :" + str(error))
            else:
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")

            #Change global job scheduler to enable
            LOG.debug("Change Global job scheduler to enable")
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not enabled")

#Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid) + " --jobschedule enabled=True"
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #Verify workload scheduler changed to enable
            self.wait_for_workload_tobe_available(self.wid)
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.PASS)
                LOG.debug("workload scheduler enabled successfully")
            else:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.FAIL)
                LOG.debug("workload scheduler enabled unsuccessfully")

#Verify interval value and nest_snapshot_run values
            schedule_details = self.getSchedulerDetails(self.wid)
            interval_after_enable = schedule_details['interval']
            next_run_time_after_enable = schedule_details['nextrun']
            LOG.debug("interval_after_enable " + str(interval_after_enable))
            LOG.debug("next_run_time_after_enable" +
                      str(next_run_time_after_enable))
            scheduled_start_time_periods = ''.join(
                [i for i in scheduled_start_time if not i.isdigit()])
            scheduled_start_time = ''.join(
                [i for i in scheduled_start_time if not i.isalpha()])
            current_time = int(time.time())
            LOG.debug("current_time " + str(current_time))
            start_time = current_time + next_run_time_after_enable
            LOG.debug("start_time " + str(start_time))
            time3hours = datetime.datetime.utcfromtimestamp(start_time)
            start_time_in_hours = time3hours.strftime('%I:%M %p')
            start_time_in_periods = ''.join(
                [i for i in start_time_in_hours if not i.isdigit()])
            start_time_in_hours = ''.join(
                [i for i in start_time_in_hours if not i.isalpha()])
            LOG.debug("start_time_in_hours " + str(start_time_in_hours))

            #Calculate difference between times in minutes
            timeA = datetime.datetime.strptime(scheduled_start_time.strip(),
                                               "%H:%M")
            timeB = datetime.datetime.strptime(start_time_in_hours.strip(),
                                               "%H:%M")
            newTime = timeA - timeB
            timedelta = newTime.seconds / 60

            #Condition for Interval value and time difference should not be more than two minutes and time periods AM/PM
            if timedelta < 2 and scheduled_start_time_periods == start_time_in_periods and interval == interval_after_enable:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.PASS)
                LOG.debug(
                    "Interval and Next snapshot run time values are correct")
            else:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.FAIL)
                raise Exception(
                    "Interval and Next snapshot run time values are incorrect")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()

        finally:
            #Delete workload
            status = self.workload_delete(self.wid)
            time.sleep(10)
Esempio n. 10
0
    def test_7_filesearch_wildcards_questionmark(self):
        reporting.add_test_script(str(__name__) + "_wildcards_questionmark")
        try:
            global instances_ids
            global snapshot_ids
            global wid
            global security_group_id
            global volumes_ids
            # Run Filesearch on vm-1
            vmid_to_search = instances_ids[0]
            filepath_to_search = "/opt/File_?"
            filecount_in_snapshots = {
                snapshot_ids[0]: 0,
                snapshot_ids[1]: 2,
                snapshot_ids[2]: 2,
                snapshot_ids[3]: 2}
            filesearch_id = self.filepath_search(
                vmid_to_search, filepath_to_search)
            snapshot_wise_filecount = self.verifyFilepath_Search(
                filesearch_id, filepath_to_search)
            for snapshot_id in filecount_in_snapshots.keys():
                if snapshot_wise_filecount[snapshot_id] == filecount_in_snapshots[snapshot_id]:
                    filesearch_status = True
                else:
                    filesearch_status = False
                    LOG.debug(
                        "Filepath Search with wildcards_questionmark unsuccessful")
                    reporting.add_test_step(
                        "Verification of Filepath serach with wildcards_questionmark",
                        tvaultconf.FAIL)
                    raise Exception(
                        "Filesearch with wildcards_questionmark does not execute correctly")

            if filesearch_status:
                LOG.debug(
                    "Filepath_Search with wildcards_questionmark successful")
                reporting.add_test_step(
                    "Verification of Filepath serach with wildcards_questionmark",
                    tvaultconf.PASS)

            # Cleanup
            # Delete all snapshots
            for snapshot_id in snapshot_ids:
                self.snapshot_delete(wid, snapshot_id)

            # Delete workload
            self.workload_delete(wid)

            # Delete VMs
            for instance_id in instances_ids:
                self.delete_vm(instance_id)

            # Delete volumes
            for volume_id in volumes_ids:
                self.delete_volume(volume_id)

            # Delete security group
            self.delete_security_group(security_group_id)

            # Delete key pair
            self.delete_key_pair(tvaultconf.key_pair_name)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 11
0
    def test_functional(self):
        try:

            ### VM and Workload ###
            tests = [[
                'tempest.api.workloadmgr.test_functional_Selective-restore', 0
            ], ['tempest.api.workloadmgr.test_functional_Inplace-restore', 0]]
            reporting.add_test_script(tests[0][0])
            vm_count = tvaultconf.vm_count
            key_pairs = self.create_kps(vm_count / 3)
            LOG.debug("\nKey pairs : {}\n".format(key_pairs))
            sec_groups = self.create_sec_groups(vm_count / 3)
            LOG.debug("\nSecurity Groups: {}\n".format(sec_groups))
            vms, boot_vols = self.multiple_vms(vm_count, key_pairs, sec_groups)
            LOG.debug("\nVMs : {}\n".format(vms))
            LOG.debug("\nBoot volumes : {}\n".format(boot_vols))
            vms = self.attach_vols(vms)
            LOG.debug("\nVolumes attached : {}\n".format(vms))
            mdsums_original = self.fill_data(vms)
            LOG.debug(
                "\nMD5 sums before snapshots : {}\n".format(mdsums_original))
            wls = self.multiple_workloads(vms)
            LOG.debug("\nWorkloads created : {}\n".format(wls))

            ### Full snapshot ###

            fullsnaps = {}
            i = 0
            for workload_id in wls:
                i += 1
                snapshot_id = self.workload_snapshot(workload_id,
                                                     True,
                                                     snapshot_cleanup=True)
                time.sleep(5)
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getSnapshotStatus(workload_id,
                                           snapshot_id) == "available"):
                    reporting.add_test_step(
                        "Create full snapshot-{}".format(i), tvaultconf.PASS)
                    LOG.debug("Full snapshot available!!")
                else:
                    reporting.add_test_step(
                        "Create full snapshot-{}".format(i), tvaultconf.FAIL)
                    raise Exception("Snapshot creation failed")

                fullsnaps[workload_id] = snapshot_id

            LOG.debug("\nFull snapshot ids : {}\n".format(fullsnaps))

            #Add some more data to files on VM
            volumes_parts = ["/dev/vdb", "/dev/vdc", "/dev/vdd"]
            mount_points = ["mount_data_a", "mount_data_b", "mount_data_c"]
            for each in vms:
                if len(vms[each]) == 3:
                    ssh = self.SshRemoteMachineConnectionWithRSAKeyName(
                        str(vms[each][2]), vms[each][0])
                    i = 0
                    for each_vol in vms[each][1]:
                        self.addCustomfilesOnLinuxVM(ssh, mount_points[i], 2)
                        i += 1
                    ssh.close()
                else:
                    pass

            ### Calculate md5sum after filling the data
            mdsums_original2 = {}
            for vm in vms.keys():
                mdsum = ""
                fip = ""
                j = 0
                vmvols = self.get_attached_volumes(vm)
                LOG.debug("\nvmvols : {}\n".format(vmvols))
                if len(vmvols) > 0:
                    for vol in vmvols:
                        if self.volumes_client.show_volume(
                                vol)['volume']['bootable'] == 'true':
                            vmvols.remove(vol)
                        else:
                            pass
                else:
                    pass
                if len(vmvols) > 0:
                    fip = vms[vm][2]
                    key = vms[vm][0]
                    for avolume in vmvols:
                        LOG.debug("\navolume : {} & j {}\n".format(avolume, j))
                        mdsum = mdsum + self.calcmd5sum(
                            fip, key, mount_points[j])
                        j += 1
                        mdsums_original2[vm] = mdsum
                else:
                    pass

            ### Incremental snapshot ###

            incrsnaps = {}
            i = 0
            for workload_id in wls:
                i += 1
                incr_snapshot_id = self.workload_snapshot(
                    workload_id, False, snapshot_cleanup=True)
                time.sleep(5)
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getSnapshotStatus(workload_id,
                                           incr_snapshot_id) == "available"):
                    reporting.add_test_step(
                        "Create incremental snapshot-{}".format(i),
                        tvaultconf.PASS)
                    LOG.debug("Incremental snapshot available!!")
                else:
                    reporting.add_test_step(
                        "Create incremental snapshot-{}".format(i),
                        tvaultconf.FAIL)
                    raise Exception("Snapshot creation failed")

                incrsnaps[workload_id] = incr_snapshot_id

            LOG.debug("\nIncremental snapshots : {}\n".format(incrsnaps))

            ### Selective Restore ###

            restores = {}
            network_details = []
            i = 0
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            mdsums_sr = {}
            instances_details = {}
            workloads = wls.items()
            for workload in workloads:
                i += 1
                instance_details = []
                wid = workload[0]
                snapshotid = fullsnaps[wid]
                wlvms = workload[1]
                for vmvol in wlvms:
                    temp_vdisks_data = []
                    temp_instance_data = {}
                    vmid = vmvol[0]
                    vmname = vmid + "_selectively_restored"
                    volumes = vmvol[1][1]
                    if len(volumes) == 0:
                        temp_instance_data = {
                            'id': vmid,
                            'availability_zone':
                            CONF.compute.vm_availability_zone,
                            'include': True,
                            'restore_boot_disk': True,
                            'name': vmname
                        }
                        instance_details.append(temp_instance_data)
                    else:
                        for volume in volumes:
                            temp_vdisks_data.append({
                                'id':
                                volume,
                                'availability_zone':
                                CONF.volume.volume_availability_zone,
                                'new_volume_type':
                                CONF.volume.volume_type
                            })
                        temp_instance_data = {
                            'id': vmid,
                            'availability_zone':
                            CONF.compute.vm_availability_zone,
                            'include': True,
                            'restore_boot_disk': True,
                            'name': vmname,
                            'vdisks': temp_vdisks_data
                        }
                        instance_details.append(temp_instance_data)

                LOG.debug("Instance details for restore: " +
                          str(instance_details))
                instances_details[wid] = instance_details

                #Trigger selective restore
                restore_id_1 = self.snapshot_selective_restore(
                    wid,
                    snapshotid,
                    restore_name=tvaultconf.restore_name,
                    restore_cleanup=True,
                    instance_details=instance_details,
                    network_details=network_details)
                self.wait_for_snapshot_tobe_available(wid, snapshotid)
                if (self.getRestoreStatus(wid, snapshotid,
                                          restore_id_1) == "available"):
                    reporting.add_test_step("Selective restore-{}".format(i),
                                            tvaultconf.PASS)
                    LOG.debug('selective restore passed')
                else:
                    reporting.add_test_step("Selective restore-{}".format(i),
                                            tvaultconf.FAIL)
                    LOG.debug('selective restore failed')
                    raise Exception("Selective restore failed")

                restores[restore_id_1] = [wid, snapshotid]

                restored_vms = self.get_restored_vm_list(restore_id_1)
                LOG.debug("\nRestored vms : {}\n".format(restored_vms))
                volumes_parts = ["/dev/vdb", "/dev/vdc", "/dev/vdd"]
                mount_points = ["mount_data_a", "mount_data_b", "mount_data_c"]
                for rvm in restored_vms:
                    mdsum = ""
                    fip = ""
                    j = 0
                    rvmname = self.get_vm_details(
                        rvm)['server']['name'].replace('_selectively_restored',
                                                       '')
                    rvmvols = self.get_attached_volumes(rvm)
                    LOG.debug("\nrvmvols : {}\n".format(rvmvols))
                    if len(rvmvols) > 0:
                        for rvol in rvmvols:
                            if self.volumes_client.show_volume(
                                    rvol)['volume']['bootable'] == 'true':
                                rvmvols.remove(rvol)
                            else:
                                pass
                    if len(rvmvols) > 0:
                        fip = self.assign_floating_ips(rvm, True)
                        key = vms[rvmname][0]
                        for rvolume in rvmvols:
                            LOG.debug("\nrvolume : {} & j {}\n".format(
                                rvolume, j))
                            ssh = self.SshRemoteMachineConnectionWithRSAKeyName(
                                str(fip), key)
                            self.execute_command_disk_mount(
                                ssh, str(fip), [volumes_parts[j]],
                                [mount_points[j]])
                            ssh.close()
                            mdsum = mdsum + self.calcmd5sum(
                                fip, key, mount_points[j])
                            j += 1
                            mdsums_sr[rvmname] = mdsum
                    else:
                        pass

            LOG.debug("MD5SUMS before restore")
            LOG.debug(mdsums_original)
            LOG.debug("MD5SUMS after restore")
            LOG.debug(mdsums_sr)

            if cmp(mdsums_original, mdsums_sr) == 0:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
                tests[0][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            ### In-place restore ###

            reporting.add_test_script(tests[1][0])
            k = 1
            workloads = wls.items()
            for workload in workloads:
                wid = workload[0]
                incrsnapid = incrsnaps[wid]

                payload = {
                    "restore": {
                        "options": {
                            'name': "inplace-{}".format(wid),
                            'description': "",
                            'type': 'openstack',
                            'oneclickrestore': False,
                            'restore_type': 'inplace',
                            'openstack': {
                                'instances': instances_details[wid],
                                'networks_mapping': {
                                    'networks': []
                                }
                            }
                        }
                    }
                }
                #self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
                resp, body = self.wlm_client.client.post(
                    "/workloads/" + wid + "/snapshots/" + incrsnapid +
                    "/restores",
                    json=payload)
                restore_id_2 = body['restore']['id']
                LOG.debug(
                    "#### workloadid: %s ,snapshot_id: %s , restore_id: %s , operation: snapshot_restore"
                    % (workload_id, incrsnapid, restore_id_2))
                LOG.debug("Response:" + str(resp.content))
                if (resp.status_code != 202):
                    resp.raise_for_status()
                LOG.debug('Restore of snapshot %s scheduled succesffuly' %
                          incrsnapid)
                if (tvaultconf.cleanup == True):
                    self.wait_for_snapshot_tobe_available(
                        workload_id, incrsnapid)
                    self.restored_vms = self.get_restored_vm_list(restore_id_2)
                    self.restored_volumes = self.get_restored_volume_list(
                        restore_id_2)
                    self.addCleanup(self.restore_delete, workload_id,
                                    incrsnapid, restore_id_2)
                    self.addCleanup(self.delete_restored_vms,
                                    self.restored_vms, self.restored_volumes)

                self.wait_for_snapshot_tobe_available(wid, incrsnapid)
                if (self.getRestoreStatus(wid, incrsnapid,
                                          restore_id_2) == "available"):
                    reporting.add_test_step("In-place restore-{}".format(k),
                                            tvaultconf.PASS)
                    LOG.debug('In-place restore passed')
                else:
                    reporting.add_test_step("In-place restore-{}".format(k),
                                            tvaultconf.FAIL)
                    LOG.debug('In-place restore failed')
                    raise Exception("In-place restore failed")
                k += 1
                restores[restore_id_2] = [wid, incrsnapid]

                mdsums_ipr = {}
                restored_vms = self.get_restored_vm_list(restore_id_2)
                LOG.debug("\nRestored vms : {}\n".format(restored_vms))
                for rvm in vms.keys():
                    mdsum = ""
                    fip = ""
                    j = 0
                    rvmvols = self.get_attached_volumes(rvm)
                    LOG.debug("\nrvmvols : {}\n".format(rvmvols))
                    if len(rvmvols) > 0:
                        for rvol in rvmvols:
                            if self.volumes_client.show_volume(
                                    rvol)['volume']['bootable'] == 'true':
                                rvmvols.remove(rvol)
                            else:
                                pass
                    else:
                        pass
                    if len(rvmvols) > 0:
                        fip = vms[rvm][2]
                        key = vms[rvm][0]
                        for rvolume in rvmvols:
                            LOG.debug("\nrvolume : {} & j {}\n".format(
                                rvolume, j))
                            ssh = self.SshRemoteMachineConnectionWithRSAKeyName(
                                str(fip), key)
                            self.execute_command_disk_mount(
                                ssh, str(fip), [volumes_parts[j]],
                                [mount_points[j]])
                            ssh.close()
                            mdsum = mdsum + self.calcmd5sum(
                                fip, key, mount_points[j])
                            j += 1
                            mdsums_ipr[rvm] = mdsum
                    else:
                        pass

            LOG.debug("MD5SUMS before restore")
            LOG.debug(mdsums_original2)
            LOG.debug("MD5SUMS after restore")
            LOG.debug(mdsums_ipr)

            if cmp(mdsums_original2, mdsums_ipr) == 0:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()

            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
Esempio n. 12
0
    def test_1_filesearch_default_parameters(self):
        reporting.add_test_script(str(__name__) + "_default_parameters")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            global instances_ids
            global snapshot_ids
            global date_from
            global date_to
            global wid
            global security_group_id
            global volumes_ids
            instances_ids = self.instances_ids
            snapshot_ids = self.snapshot_ids
            date_from = self.date_from
            date_to = self.date_to
            wid = self.wid
            volumes_ids = self.volumes_ids
            security_group_id = self.security_group_id
            # Run Filesearch on vm-1
            vmid_to_search = instances_ids[0]
            filepath_to_search = "/opt/File_1"

            LOG.debug(
                "global parameters: {0} {1} {2} {3} {4} {5} {6}".format(
                    str(instances_ids),
                    str(snapshot_ids),
                    str(date_from),
                    str(date_to),
                    str(wid),
                    str(volumes_ids),
                    str(security_group_id)))
            filecount_in_snapshots = {
                snapshot_ids[0]: 0,
                snapshot_ids[1]: 1,
                snapshot_ids[2]: 1,
                snapshot_ids[3]: 1}
            filesearch_id = self.filepath_search(
                vmid_to_search, filepath_to_search)
            snapshot_wise_filecount = self.verifyFilepath_Search(
                filesearch_id, filepath_to_search)
            for snapshot_id in filecount_in_snapshots.keys():
                if snapshot_wise_filecount[snapshot_id] == filecount_in_snapshots[snapshot_id]:
                    filesearch_status = True
                else:
                    filesearch_status = False
                    LOG.debug("Filepath Search default_parameters unsuccessful")
                    reporting.add_test_step(
                        "Verification of Filepath serach default_parameters",
                        tvaultconf.FAIL)
                    raise Exception(
                        "Filesearch default_parameters does not execute correctly")

            if filesearch_status:
                LOG.debug("Filepath_Search default_parameters successful")
                reporting.add_test_step(
                    "Verification of Filepath serach default_parameters",
                    tvaultconf.PASS)
                reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 13
0
    def test_chargeback_api(self):
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            vm_id = self.vm_id
            wid = self.wid
            chargeback_info = self.getTenantChargeback()
            if not chargeback_info:
                reporting.add_test_step("Verified Chargeback API",
                                        tvaultconf.FAIL)
                LOG.debug("Verified Chargeback API failed")
                raise Exception("Verified Chargeback API Failed")
            else:
                reporting.add_test_step("Verified Chargeback API",
                                        tvaultconf.PASS)
            workload_id_chargeback = chargeback_info[str(
                CONF.identity.tenant_id)]['tenant_name']
            LOG.debug(" Env Tenant ID: " + Tenant_id)
            LOG.debug(" Instance ID : " + vm_id)

            #Verify workload ID
            openstack_workload_ids = chargeback_info[Tenant_id][
                'workloads'].keys()
            LOG.debug(" Workload ID : " + openstack_workload_ids[1])
            for worklad_id in openstack_workload_ids:
                if (worklad_id == wid):
                    LOG.debug(" Workload ID : " + wid)
                    workload_found = True
            if (workload_found == True):
                reporting.add_test_step(" Verified workload id ",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step(" Verified workload id ",
                                        tvaultconf.FAIL)
                raise Exception(" Verification for workload id failed ")

        #Verify Instance ID
            openstack_instance_ids = chargeback_info[Tenant_id]['workloads'][
                wid]['protected_vms'].keys()
            LOG.debug(" VM Name : " + openstack_instance_ids[0])
            for instance_id in openstack_instance_ids:
                if (instance_id == vm_id):
                    LOG.debug(" VM ID : " + instance_id)
                    instance_found = True
            if (instance_found == True):
                reporting.add_test_step(" Verified instance id ",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step(" Verified instance id ",
                                        tvaultconf.FAIL)
                raise Exception(" Varification for instance id failed ")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 14
0
    def test_tenants_usage(self):
        try:
            # Run getTenantUsage API

            Tenant_Usage = self.getTenantUsage()
            global_usage_total_vms = Tenant_Usage['global_usage']['total_vms']
            LOG.debug(" Total VM In Openstack  : " +
                      str(global_usage_total_vms))
            tenants_usage_vms_protected = Tenant_Usage['tenants_usage'][str(
                CONF.identity.tenant_id)]['vms_protected']
            LOG.debug(" VM Procted in Tenant : " +
                      str(tenants_usage_vms_protected))

            # Run Prereqisite

            prerequisites.basic_workload(self)
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            # Get the openstack global and tenant usage after creating instance
            # and workload

            Tenant_Usage_after_prereqisite = self.getTenantUsage()
            global_usage_total_vms_after_pre_req = Tenant_Usage_after_prereqisite[
                'global_usage']['total_vms']
            LOG.debug(" Total vms in opensatck after prereqisite run : " +
                      str(global_usage_total_vms_after_pre_req))
            tenants_usage_vms_protected_after_pre_req = Tenant_Usage_after_prereqisite[
                'tenants_usage'][CONF.identity.tenant_id]['vms_protected']
            LOG.debug(" No. of vms protected after prerequisite : " +
                      str(tenants_usage_vms_protected_after_pre_req))

            # Verify Global Usage
            if ((global_usage_total_vms +
                 1) == global_usage_total_vms_after_pre_req):
                LOG.debug(" Global usage total vms value is correct ")
                reporting.add_test_step("Verify total vms in openstack",
                                        tvaultconf.PASS)
            else:
                LOG.debug(" Total vms in openstack is incorrect")
                reporting.add_test_step("Verify total vms in openstack",
                                        tvaultconf.FAIL)
                raise Exception(
                    " Verification for total vms in openstack failed ")

            # Verify Global Usage
            if ((tenants_usage_vms_protected +
                 1) == tenants_usage_vms_protected_after_pre_req):
                LOG.debug(" No. of total protected vms in tenant is correct ")
                reporting.add_test_step(
                    " Verify total protected vms in tenant ", tvaultconf.PASS)
            else:
                LOG.debug(" No. of total protected vms in tenant is incorrect")
                reporting.add_test_step(
                    " Verify total protected vms in tenant", tvaultconf.FAIL)
                raise Exception(
                    " Verification for protected vms in tenant failed ")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_3_modify_workload_scheduler_enable(self):
        reporting.add_test_script(str(__name__) + "_scheduler_enable")
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID-3: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID-3: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached-3")

            # Create workload with scheduler disabled using CLI
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + \
                str(self.vm_id) + " --jobschedule enabled=False"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload create did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command workload create executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID-3: " + str(self.wid))
            if (self.wid is not None):
                self.wait_for_workload_tobe_available(self.wid)
                if (self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.PASS)
                else:
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step(
                    "Create workload with scheduler disable", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
            LOG.debug("Workload ID: " + str(self.wid))

            # Verify workload created scheduler disable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Workload has not been created with scheduler disabled")
            else:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug(
                    "Workload created with scheduler disabled successfully")

            # Get workload scheduler details
            schedule_details = self.getSchedulerDetails(self.wid)
            scheduled_start_time = schedule_details['start_time']
            interval = schedule_details['interval']

            # Change global job scheduler to disable
            LOG.debug("Change Global job scheduler to disable")
            status = self.disable_global_job_scheduler()
            if not status:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler disabled successfully")
            else:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not disabled")

            # Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + \
                str(self.wid) + " --jobschedule enabled=True"
            error = cli_parser.cli_error(workload_modify_command)
            if error and (str(
                    error.strip('\n')
            ).find("Cannot update scheduler related fields when global jobscheduler is disabled."
                   ) != -1):
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.PASS)
                LOG.debug("Error message :" + str(error))
            else:
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")

            # Change global job scheduler to enable
            LOG.debug("Change Global job scheduler to enable")
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not enabled")

            # Modify workload scheduler to enable and set the start date, time
            # and timezone
            now = datetime.datetime.utcnow()
            now_date = datetime.datetime.strftime(now, "%m/%d/%Y")
            now_time = datetime.datetime.strftime(now, "%I:%M %p")
            now_time_plus_15 = now + datetime.timedelta(minutes=15)
            now_time_plus_15 = datetime.datetime.strftime(
                now_time_plus_15, "%I:%M %p")
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid
            ) + " --jobschedule enabled=True" + " --jobschedule start_date=" + str(
                now_date) + " --jobschedule start_time=" + "'" + str(
                    now_time_plus_15).strip(
                    ) + "'" + " --jobschedule timezone=UTC"
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # Verify workload scheduler changed to enable
            self.wait_for_workload_tobe_available(self.wid)
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.PASS)
                LOG.debug("workload scheduler enabled successfully")
            else:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.FAIL)
                LOG.debug("workload scheduler enabled unsuccessfully")

            # Verify interval value and nest_snapshot_run values
            schedule_details = self.getSchedulerDetails(self.wid)
            interval_after_enable = schedule_details['interval']
            next_run_time_after_enable = schedule_details['nextrun']
            next_run_time_after_enable = int(next_run_time_after_enable)
            LOG.debug("interval_after_enable " + str(interval_after_enable))
            LOG.debug("next_run_time_after_enable" +
                      str(next_run_time_after_enable))
            start_date = schedule_details['start_date']
            start_time = schedule_details['start_time']
            date_time = start_date + " " + start_time
            start_date_time = datetime.datetime.strptime(
                date_time, "%m/%d/%Y %H:%M %p")
            LOG.debug("Scheduled start and date time is: " +
                      str(start_date_time))
            utc_24hr = datetime.datetime.utcnow()
            utc_12hr = datetime.datetime.strftime(utc_24hr,
                                                  "%m/%d/%Y %I:%M %p")
            utc_12hr = datetime.datetime.strptime(utc_12hr,
                                                  "%m/%d/%Y %H:%M %p")
            time_diff = (start_date_time - utc_12hr).total_seconds()
            time_diff = int(time_diff)
            LOG.debug(
                "Time difference between UTC time and scheduled start time: " +
                str(time_diff))
            delta = abs(time_diff - next_run_time_after_enable)

            # Condition for Interval value and time difference should not be
            # more than two minutes
            if delta < 120 and interval == interval_after_enable:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.PASS)
                LOG.debug(
                    "Interval and Next snapshot run time values are correct")
            else:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.FAIL)
                raise Exception(
                    "Interval and Next snapshot run time values are incorrect")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()

        finally:
            # Delete workload
            status = self.workload_delete(self.wid)
            time.sleep(10)
Esempio n. 16
0
    def test_tvault1037_list_restore(self):
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            # Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            # Delete instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            # Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            # Create one-click restore
            self.restore_id = self.snapshot_restore(self.wid, self.snapshot_id,
                                                    tvaultconf.restore_name)
            LOG.debug("Restore ID: " + str(self.restore_id))

            # Wait till restore is complete
            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_id)
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_id)
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step("One click Restore", tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            # List Restores using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.restore_list)
            if rc != 0:
                reporting.add_test_step("Execute restore-list command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute restore-list command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_available_restores()
            out = cli_parser.cli_output(command_argument_string.restore_list)
            if (int(wc) == int(out)):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Restore list command listed available restores correctly")
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Restore list command did not list available restores correctly"
                )
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_1_modify_workload_tvault1045_add_instance(self):
        reporting.add_test_script(str(__name__) + "_tvault1045_add_instance")
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload with scheduler enabled
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name,
                workload_cleanup=True)
            LOG.debug("Workload ID: " + str(self.wid))

            # Launch second instance
            self.vm_id2 = self.create_vm()
            LOG.debug("VM ID2: " + str(self.vm_id2))

            # Create volume
            self.volume_id2 = self.create_volume()
            LOG.debug("Volume ID2: " + str(self.volume_id2))

            # Attach volume to the instance
            self.attach_volume(self.volume_id2, self.vm_id2)
            LOG.debug("Volume2 attached")

            # Modify workload to add new instance using CLI command
            workload_modify_command = command_argument_string.workload_modify + "--instance instance-id=" + \
                str(self.vm_id2) + " --instance instance-id=" + str(self.vm_id) + " " + str(self.wid)
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify command to add one more vm",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify command to add one more vm",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            self.wait_for_workload_tobe_available(self.wid)
            workload_vm_count = query_data.get_available_vms_of_workload(
                self.wid)
            if (workload_vm_count == 2):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug("Vm has been added successfully")
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception("Vm has not been added")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 18
0
    def test_network_restore(self):
        try:
            reporting.add_test_script(str(__name__))
            self.delete_network_topology()
            vms = []
            ntwrks = self.create_network()
            for network in ntwrks:
                if network['name'] in ['Private-1', 'Private-2', 'Private-5']:
                    vm_name = "instance-{}".format(network['name'])
                    vmid = self.create_vm(vm_name=vm_name,
                                          networkid=[{
                                              'uuid': network['id']
                                          }],
                                          vm_cleanup=True)
                    vms.append((vm_name, vmid))
            LOG.debug("Launched vms : {}".format(vms))

            nws = [x['id'] for x in ntwrks]

            nt_bf, sbnt_bf, rt_bf, intf_bf = self.get_topology_details()

            vms_ids = [x[1] for x in vms]
            workload_id = self.workload_create(vms_ids,
                                               tvaultconf.parallel,
                                               workload_cleanup=True)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            snapshot_id = self.workload_snapshot(workload_id,
                                                 True,
                                                 snapshot_cleanup=True)
            time.sleep(5)
            self.wait_for_workload_tobe_available(workload_id)
            if (self.getSnapshotStatus(workload_id,
                                       snapshot_id) == "available"):
                reporting.add_test_step("Create full snapshot",
                                        tvaultconf.PASS)
                LOG.debug("Full snapshot available!!")
            else:
                reporting.add_test_step("Create full snapshot",
                                        tvaultconf.FAIL)
                raise Exception("Snapshot creation failed")

            instance_details = []
            for vm in vms:
                temp_instance_data = {
                    'id': vm[1],
                    'include': True,
                    'restore_boot_disk': True,
                    'name': vm[0] + "restored_instance",
                    'vdisks': []
                }
                instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            vm_details_bf = {}
            for vm in vms:
                vm_details_bf[vm[0]] = self.get_vm_details(vm[1])['server']
                self.delete_vm(vm[1])
            self.delete_network_topology()

            restore_id = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                instance_details=instance_details,
                network_restore_flag=True,
                restore_cleanup=True)

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id) == "available"):
                reporting.add_test_step(
                    "Selective restore with network restore", tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Selective restore with network restore", tvaultconf.FAIL)
                raise Exception(
                    "Selective restore with network restore failed")

            nt_af, sbnt_af, rt_af, intf_af = self.get_topology_details()
            if nt_bf == nt_af:
                reporting.add_test_step(
                    "Verify network details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify network details after network restore",
                    tvaultconf.FAIL)
                LOG.error("Network details before and after restore: {0}, {1}".
                          format(nt_bf, nt_af))

            if sbnt_bf == sbnt_af:
                reporting.add_test_step(
                    "Verify subnet details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify subnet details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Subnet details before and after restore: {0}, {1}".format(
                        sbnt_bf, sbnt_af))

            if rt_bf == rt_af:
                reporting.add_test_step(
                    "Verify router details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify router details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Router details before and after restore: {0}, {1}".format(
                        rt_bf, rt_af))

            if intf_bf == intf_af:
                reporting.add_test_step(
                    "Verify interface details after network restore",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify interface details after network restore",
                    tvaultconf.FAIL)
                LOG.error(
                    "Interface details before and after restore: {0}, {1}".
                    format(intf_bf, intf_af))

            vm_details_af = {}
            restored_vms = self.get_restored_vm_list(restore_id)
            for vm in restored_vms:
                vm_details = self.get_vm_details(vm)['server']
                vm_details_af[vm_details['name'].replace(
                    'restored_instance', '')] = vm_details

            klist = vm_details_bf.keys()
            klist.sort()

            for vm in klist:
                netname = vm_details_bf[vm]['addresses'].keys()[0]
                vm_details_bf[vm]['addresses'][netname][0][
                    'OS-EXT-IPS-MAC:mac_addr'] = ''
                vm_details_af[vm]['addresses'][netname][0][
                    'OS-EXT-IPS-MAC:mac_addr'] = ''
                vm_details_bf[vm]['links'][1]['href'] = ''
                vm_details_af[vm]['links'][1]['href'] = ''
                del vm_details_af[vm]['metadata']['config_drive']
                del vm_details_af[vm]['metadata']['ordered_interfaces']
                del vm_details_bf[vm]['links']
                del vm_details_af[vm]['links']
                vm_details_bf[vm]['OS-EXT-SRV-ATTR:instance_name'] = ''
                vm_details_af[vm]['OS-EXT-SRV-ATTR:instance_name'] = ''
                vm_details_bf[vm]['updated'] = ''
                vm_details_af[vm]['updated'] = ''
                vm_details_bf[vm]['created'] = ''
                vm_details_af[vm]['created'] = ''
                vm_details_bf[vm]['id'] = ''
                vm_details_af[vm]['id'] = ''
                vm_details_bf[vm]['OS-SRV-USG:launched_at'] = ''
                vm_details_af[vm]['OS-SRV-USG:launched_at'] = ''
                vm_details_af[vm]['name'] = vm_details_af[vm]['name'].replace(
                    'restored_instance', '')

            if vm_details_bf == vm_details_af:
                reporting.add_test_step(
                    "Verify instance details after restore", tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Verify instance details after restore", tvaultconf.FAIL)
                LOG.error(
                    "Instance details before and after restore: {0}, {1}".
                    format(vm_details_bf, vm_details_af))

            for rvm in restored_vms:
                self.delete_vm(rvm)
            self.delete_network_topology()

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write
    def test_tvault1290_delete_restore(self):
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []

            #Launch instance
            self.vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id,
                               self.vm_id,
                               attach_cleanup=False)
            LOG.debug("Volume attached")

            #Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            #Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))

            #Wait till snapshot is complete
            wc = query_data.get_workload_snapshot_status(
                tvaultconf.snapshot_name, tvaultconf.snapshot_type_full,
                self.snapshot_id)
            LOG.debug("Workload snapshot status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_workload_snapshot_status(
                    tvaultconf.snapshot_name, tvaultconf.snapshot_type_full,
                    self.snapshot_id)
                LOG.debug("Workload snapshot status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Workload snapshot successfully completed")
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break
            if (self.created == False):
                raise Exception("Workload snapshot did not get created")

            #Delete instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            #Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            #Create one-click restore
            self.restore_id = self.snapshot_restore(self.wid,
                                                    self.snapshot_id,
                                                    tvaultconf.restore_name,
                                                    restore_cleanup=False)
            LOG.debug("Restore ID: " + str(self.restore_id))
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            self.restore_vm_id = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restore VM ID: " + str(self.restore_vm_id))

            self.restore_volume_id = self.get_restored_volume_list(
                self.restore_id)
            LOG.debug("Restore Volume ID: " + str(self.restore_volume_id))

            #Delete restore for snapshot using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.restore_delete + self.restore_id)
            if rc != 0:
                reporting.add_test_step("Execute restore-delete command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute restore-delete command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            time.sleep(5)

            wc = query_data.get_snapshot_restore_delete_status(
                tvaultconf.restore_name, tvaultconf.restore_type)
            if (str(wc) == "1"):
                reporting.add_test_step("Verification", tvaultconf.PASS)
                LOG.debug("Snapshot restore successfully deleted")
            else:
                reporting.add_test_step("Verification", tvaultconf.FAIL)
                raise Exception("Restore did not get deleted")

            #Cleanup
            #Delete restored VM instance and volume
            self.delete_restored_vms(self.restore_vm_id,
                                     self.restore_volume_id)
            LOG.debug("Restored VMs deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 20
0
    def test_retention(self):
        try:

            vm_id = self.create_vm(vm_cleanup=True)
            LOG.debug("VM ID : " + str(vm_id))
            i = 1

            jobschedule = {
                'retention_policy_type': 'Number of Snapshots to Keep',
                'retention_policy_value': '3',
                'full_backup_interval': '2'
            }
            rpv = int(jobschedule['retention_policy_value'])
            workload_id = self.workload_create([vm_id],
                                               tvaultconf.parallel,
                                               jobschedule=jobschedule,
                                               workload_cleanup=True)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            for i in range(0, (rpv + 1)):
                snapshot_id = self.workload_snapshot(workload_id,
                                                     True,
                                                     snapshot_cleanup=False)
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getSnapshotStatus(workload_id,
                                           snapshot_id) == "available"):
                    reporting.add_test_step(
                        "Create full snapshot-{}".format(i + 1),
                        tvaultconf.PASS)
                    LOG.debug("Full snapshot available!!")
                else:
                    reporting.add_test_step(
                        "Create full snapshot-{}".format(i + 1),
                        tvaultconf.FAIL)
                    raise Exception("Snapshot creation failed")

            snapshotlist = self.getSnapshotList(workload_id=workload_id)
            if len(snapshotlist) == rpv:
                reporting.add_test_step("Retention", tvaultconf.PASS)
                LOG.debug("Retention worked!!")
            else:
                reporting.add_test_step("Retention", tvaultconf.FAIL)
                LOG.debug("Retention didn't work!!")
                raise Exception("Retention failed")
            if (tvaultconf.cleanup == True):
                for snapshot in snapshotlist:
                    self.addCleanup(self.snapshot_delete, workload_id,
                                    snapshot)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_3_config_workload_reconfigure(self):
        reporting.add_test_script(
            str(__name__) + "_reconfigure_additional_dir")
        try:
            # prerequisite handles config_user creation and config_backup_pvk(private key) creation

            # for config backup configuration, yaml_file creation
            added_dir = {
                'tvault-contego': {
                    'config_dir': ['/etc/tvault-contego/']
                }
            }
            self.create_config_backup_yaml(added_dir=added_dir)

            # config backup configuration with CLI command
            config_workload_command = command_argument_string.config_workload_configure + " --config-file yaml_file.yaml --authorized-key config_backup_pvk "

            LOG.debug("config workload configure cli command: " +
                      str(config_workload_command))

            rc = cli_parser.cli_returncode(config_workload_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            config_workload_show_command = command_argument_string.config_workload_show

            config_workload_output = cli_parser.cli_output(
                config_workload_show_command)

            LOG.debug("config_workload_show_command output from cli: " +
                      str(config_workload_output))

            if added_dir.keys()[0] in str(config_workload_output):
                LOG.debug("config_workload output with added dir: " +
                          added_dir.keys()[0] + " : " +
                          str(config_workload_output))
                reporting.add_test_step(
                    "config_workload completed with added dir ",
                    tvaultconf.PASS)
            else:
                LOG.debug("config_workload output without added dir: " +
                          added_dir.keys()[0] + " : " +
                          str(config_workload_output))
                reporting.add_test_step(
                    "config_workload completed with added dir ",
                    tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 22
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###

            reporting.add_test_script(str(__name__))

            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_dir_before = {}

            #Create Keypair
            kp = self.create_key_pair(tvaultconf.key_pair_name,
                                      keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            #Create bootable volume
            boot_volume_id = self.create_volume(
                size=tvaultconf.bootfromvol_vol_size,
                image_id=CONF.compute.image_ref,
                volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{
                "source_type": "volume",
                "delete_on_termination": "false",
                "boot_index": 0,
                "uuid": boot_volume_id,
                "destination_type": "volume"
            }]

            #Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            #Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            #Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            #Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_dir_before = self.calcmd5sum(floating_ip_1,
                                                 mount_points[0])
            LOG.debug("MD5sums for directory on original vm : " +
                      str(md5sums_dir_before))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command did not execute correctly for full snapshot")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly for full snapshot")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            LOG.debug("Sleeping for 40s")
            time.sleep(40)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()

            ### Incremental snapshot ###

            self.created = False

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details = []
            vms_details_after_restore = []
            temp_vdisks_data = []

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            temp_vdisks_data.append([{
                'id': volume_id,
                'availability_zone': CONF.volume.volume_availability_zone,
                'new_volume_type': CONF.volume.volume_type
            }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_selectively_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': temp_vdisks_data[0]
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=instance_details,
                network_details=network_details)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> " +
                      str(floating_ip_2))
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("MD5SUMS after restore")
            LOG.debug(md5sums_dir_after[str(floating_ip_2)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        vm_id,
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': volume_id,
                            'new_volume_type': CONF.volume.volume_type
                        }],
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(40)
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_dir_before---->")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("<----md5sums_dir_after---->")
            LOG.debug(md5sums_dir_after[str(floating_ip_1)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)

            ### One-click restore ###

            mdb = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS before deleting the instance for one click restore : "
                + str(mdb))

            self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
            self.detach_volume(vm_id, volume_id)

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            #Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            #Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug("Volumes deleted successfully for one click restore : " +
                      str(volume_id))

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            mda = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            mda = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS after deleting the instance for one click restore : "
                + str(mda))
            ssh.close()

            if mdb[str(floating_ip_1)] == mda[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
                self.detach_volume(vm_id, volume_id)
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
                self.delete_volume(boot_volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 23
0
    def test_email(self):
        try:
            # Fetch existing settings
            existing_setting = self.get_settings_list()
            LOG.debug("Existing setting list: " + str(existing_setting))
            # Delete any existing settings
            flag = False
            if (existing_setting != {}):
                for k, v in existing_setting.items():
                    if (self.delete_setting(k) == False):
                        flag = True
            if flag:
                reporting.add_test_step("Delete existing setting",
                                        tvaultconf.FAIL)
            else:
                # Update trilioVault email settings
                settings_resp = self.update_email_setings(
                    tvaultconf.setting_data)
                setting_data_from_resp = {}

                for i in range(0, len(settings_resp)):
                    setting_data_from_resp[
                        settings_resp[i]['name']] = settings_resp[i]['value']
                LOG.debug("Settings data from response: " +
                          str(setting_data_from_resp) +
                          " ; original setting data: " +
                          str(tvaultconf.setting_data))
                if (operator.eq(setting_data_from_resp,
                                tvaultconf.setting_data)):
                    reporting.add_test_step("Update email settings",
                                            tvaultconf.PASS)

                    # Enable email notification for project
                    enable_email_resp = self.update_email_setings(
                        tvaultconf.enable_email_notification)[0]
                    if ((str(enable_email_resp['name']) == 'smtp_email_enable')
                            and (str(enable_email_resp['value']) == '1')):
                        reporting.add_test_step(
                            "Enable email notification for project",
                            tvaultconf.PASS)
                        self.wlm_client.client.get(
                            "/workloads/email/test_email")

                        cmd = 'curl  -u ' + tvaultconf.setting_data["smtp_default_recipient"] + ':' + \
                            tvaultconf.tvault_password + ' --silent "https://mail.google.com/mail/feed/atom"'
                        op = subprocess.check_output(cmd, shell=True)
                        if len(
                                re.findall(
                                    'Testing email configuration',
                                    op.decode().split('<entry>')[1])) == 1:
                            LOG.debug(
                                "Email testing done correctly and email is : {}"
                                .format(op))
                            reporting.add_test_step("Test email",
                                                    tvaultconf.PASS)
                        else:
                            reporting.add_test_step("Test email",
                                                    tvaultconf.FAIL)
                        existing_setting = self.get_settings_list()
                        LOG.debug("Existing setting list: " +
                                  str(existing_setting))

                        # Delete the existing settings
                        for k, v in existing_setting.items():
                            self.delete_setting(k)
                    else:
                        reporting.add_test_step(
                            "Enable email notification for project",
                            tvaultconf.FAIL)
                        reporting.set_test_script_status(tvaultconf.FAIL)
                else:
                    reporting.add_test_step("Update email settings",
                                            tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 24
0
    def test_ubuntu_smallvolumes_selectiverestore_defaultsdeleted(self):
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.delete_vms(self.workload_instances)
            self.delete_volumes(self.workload_volumes)
            self.delete_key_pair(tvaultconf.key_pair_name)
            self.delete_security_group(self.security_group_id)
            self.delete_flavor(self.flavor_id)

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json
            temp_vdisks_data = []
            for i in range(len(self.workload_instances)):
                flag = i + i

                temp_vdisks_data.append([{
                    'id':
                    self.workload_volumes[flag],
                    'availability_zone':
                    CONF.volume.volume_availability_zone,
                    'new_volume_type':
                    CONF.volume.volume_type
                }, {
                    'id':
                    self.workload_volumes[flag + 1],
                    'availability_zone':
                    CONF.volume.volume_availability_zone,
                    'new_volume_type':
                    CONF.volume.volume_type
                }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            for i in range(len(self.workload_instances)):
                vm_name = "tempest_test_vm_" + str(i + 1) + "_restored"
                temp_instance_data = {
                    'id': self.workload_instances[i],
                    'availability_zone': CONF.compute.vm_availability_zone,
                    'include': True,
                    'restore_boot_disk': True,
                    'name': vm_name,
                    'vdisks': temp_vdisks_data[i]
                }
                self.instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " +
                      str(self.instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            self.network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " +
                      str(self.network_details))

            #Trigger selective restore
            self.restore_id = self.snapshot_selective_restore(
                self.workload_id,
                self.snapshot_id,
                restore_name=tvaultconf.restore_name,
                instance_details=self.instance_details,
                network_details=self.network_details,
                sec_group_cleanup=True)
            self.wait_for_snapshot_tobe_available(self.workload_id,
                                                  self.snapshot_id)
            if (self.getRestoreStatus(self.workload_id, self.snapshot_id,
                                      self.restore_id) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

    #Fetch instance details after restore
            self.restored_vm_details_list = []
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            for id in range(len(self.vm_list)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(self.vms_details_after_restore)):

                if (self.vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(self.vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

                if (self.get_key_pair_details(
                        self.vms_details_after_restore[i]['keypair']) ==
                        self.original_fingerprint):
                    reporting.add_test_step(
                        "Keypair verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Original keypair details: " +
                              str(self.original_fingerprint))
                    LOG.error("Restored keypair details: " + str(
                        self.get_key_pair_details(
                            self.vms_details_after_restore[i]['keypair'])))
                    reporting.add_test_step(
                        "Keypair verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

                if (self.get_flavor_details(
                        self.vms_details_after_restore[i]['flavor_id']) ==
                        self.original_flavor_conf):
                    reporting.add_test_step(
                        "Flavor verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Original flavor details: " +
                              str(self.original_flavor_conf))
                    LOG.error("Restored flavor details: " + str(
                        self.get_flavor_details(
                            self.vms_details_after_restore[i]['flavor_id'])))
                    reporting.add_test_step(
                        "Flavor verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 25
0
    def test_1_image_booted(self):
        try:
            deleted = 0
            ## VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_image_booted_Selective-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Inplace-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Oneclick-restore',0]]
            reporting.add_test_script(tests[0][0])
            data_dir_path = "/root"
            md5sums_before_full = {}
            LOG.debug("******************")            
            kp = self.create_key_pair(tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : "+str(kp))            

            vm_id = self.create_vm(key_pair=kp, vm_cleanup=False)
            LOG.debug("VM ID : "+str(vm_id))
            time.sleep(30)

            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : "+str(floating_ip_1))

            LOG.debug("Sleeping for 20 sec")
            time.sleep(20)
            
            self.data_ops(floating_ip_1, data_dir_path, 3)
            LOG.debug("Created data")            

            md5sums_before_full = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5sums for directory on original vm : "+str(md5sums_before_full))

            
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    raise Exception("Workload creation failed")
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, data_dir_path, 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(CONF.network.internal_network_id)
            volumeslist =  []
            rest_details['instances'] = {vm_id:volumeslist}

            payload = self.create_restore_json(rest_details)
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id,restore_name=tvaultconf.restore_name, restore_cleanup=True,
                                                            instance_details=payload['instance_details'], network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> "+str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            md5sums_after_selective = self.calcmd5sum(floating_ip_2, data_dir_path)
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))
            #Compare the data before and after restore
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id) 
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()


            ### In-place Restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id:volumeslist}

            reporting.add_test_script(tests[1][0]) 
            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_inplace = self.calcmd5sum(floating_ip_1, data_dir_path)
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_2)


            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            time.sleep(10)

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_1clickrestore = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5SUMS after one click restore : {}".format(md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
    def test_tvault1047_unlock_workload(self):
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            # Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))

            wc = query_data.get_workload_status_by_id(self.wid)
            LOG.debug("Workload status: " + str(wc))

            # Unlock workload using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_unlock + self.wid)
            if rc != 0:
                reporting.add_test_step("Execute workload-unlock command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-unlock command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # Get workload status
            wc = query_data.get_workload_status_by_id(self.wid)
            LOG.debug("Workload status: " + str(wc))

            if ('available' == str(wc)):
                reporting.add_test_step("Verification", tvaultconf.PASS)
            else:
                raise Exception("Workload status update failed")

            try:
                self.wait_for_snapshot_tobe_available(self.wid,
                                                      self.snapshot_id)
                LOG.debug("Snapshot is available")
            except Exception as e:
                LOG.error("Snapshot is in error state")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Esempio n. 27
0
    def test_5_regression(self):
        reporting.add_test_script(
            str(__name__) + "_selective_restore_bootfromvol")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.created = False
            volumes = tvaultconf.volumes_parts
            mount_points = ["mount_data_b", "mount_data_c"]

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json
            for i in range(len(self.workload_instances)):
                vm_name = "tempest_test_vm_" + str(i + 1) + "_restored"
                temp_instance_data = {
                    'id': self.workload_instances[i],
                    'include': True,
                    'restore_boot_disk': True,
                    'name': vm_name,
                    'vdisks': []
                }
                self.instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " +
                      str(self.instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            self.network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " +
                      str(self.network_details))

            #Fill some more data on each volume attached
            tree = lambda: collections.defaultdict(tree)
            self.md5sums_dir_before = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    self.addCustomSizedfilesOnLinux(ssh, mount_point, 5)
                    ssh.close()
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    self.md5sums_dir_before[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()

            LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before))

            #Trigger selective restore
            self.restore_id = self.snapshot_selective_restore(
                self.workload_id,
                self.snapshot_id,
                restore_name=tvaultconf.restore_name,
                instance_details=self.instance_details,
                network_details=self.network_details)
            self.wait_for_snapshot_tobe_available(self.workload_id,
                                                  self.snapshot_id)
            if (self.getRestoreStatus(self.workload_id, self.snapshot_id,
                                      self.restore_id) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

        #Fetch instance details after restore
            self.restored_vm_details_list = []
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            for id in range(len(self.vm_list)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(self.vms_details_after_restore)):
                if (self.vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(self.vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                if (self.get_key_pair_details(
                        self.vms_details_after_restore[i]['keypair']) ==
                        self.original_fingerprint):
                    reporting.add_test_step(
                        "Keypair verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Original keypair details: " +
                              str(self.original_fingerprint))
                    LOG.error("Restored keypair details: " + str(
                        self.get_key_pair_details(
                            self.vms_details_after_restore[i]['keypair'])))
                    reporting.add_test_step(
                        "Keypair verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                if (self.get_flavor_details(
                        self.vms_details_after_restore[i]['flavor_id']) ==
                        self.original_flavor_conf):
                    reporting.add_test_step(
                        "Flavor verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Original flavor details: " +
                              str(self.original_flavor_conf))
                    LOG.error("Restored flavor details: " + str(
                        self.get_flavor_details(
                            self.vms_details_after_restore[i]['flavor_id'])))
                    reporting.add_test_step(
                        "Flavor verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

        #Verify floating ips
            self.floating_ips_after_restore = []
            for i in range(len(self.vms_details_after_restore)):
                self.floating_ips_after_restore.append(
                    self.vms_details_after_restore[i]['floating_ip'])
            if (self.floating_ips_after_restore.sort() ==
                    self.floating_ips_list.sort()):
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.PASS)
            else:
                LOG.error("Floating ips before restore: " +
                          str(self.floating_ips_list.sort()))
                LOG.error("Floating ips after restore: " +
                          str(self.floating_ips_after_restore.sort()))
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #calculate md5sum after restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_selective_restore = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    md5_sum_after_selective_restore[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()
            LOG.debug("md5_sum_after_selective_restore" +
                      str(md5_sum_after_selective_restore))

            #md5sum verification
            if (self.md5sums_dir_before == md5_sum_after_selective_restore):
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
            else:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_2_modify_workload_scheduler_disable(self):
        reporting.add_test_script(str(__name__) + "_scheduler_disable")
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID-2: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID-2: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached-2")

            # Create workload with scheduler enabled
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name,
                workload_cleanup=True)
            LOG.debug("Workload ID-2: " + str(self.wid))

            # Verify workload created with scheduler enable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step(
                    "Workload created with scheduler enabled", tvaultconf.PASS)
                LOG.debug(
                    "Workload created with scheduler enabled successfully")
            else:
                reporting.add_test_step(
                    "Workload created with scheduler enabled", tvaultconf.FAIL)
                raise Exception(
                    "Workload has not been created with scheduler enabled")

            # Get workload scheduler details
            schedule_details = self.getSchedulerDetails(self.wid)
            scheduled_start_time = schedule_details['start_time']
            interval = schedule_details['interval']

            # Change global job scheduler to disable
            LOG.debug("Change Global job scheduler to disable")
            status = self.disable_global_job_scheduler()
            if not status:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler disabled successfully")
            else:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not disabled")

            # Modify workload scheduler to disable
            workload_modify_command = command_argument_string.workload_modify + \
                str(self.wid) + " --jobschedule enabled=False"
            error = cli_parser.cli_error(workload_modify_command)
            if error and (str(
                    error.strip('\n')
            ).find("Cannot update scheduler related fields when global jobscheduler is disabled."
                   ) != -1):
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.PASS)
                LOG.debug("Error message :" + str(error))
            else:
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler disable",
                    tvaultconf.FAIL)
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")

            # Change global job scheduler to enable
            LOG.debug("Change Global job scheduler to enable")
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not enabled")

            # Modify workload scheduler to disable using CLI command
            workload_modify_command = command_argument_string.workload_modify + \
                str(self.wid) + " --jobschedule enabled=False"
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify scheduler disable",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # Verify workload scheduler changed to disable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step("Verify workload scheduler disabled",
                                        tvaultconf.FAIL)
                LOG.debug("workload scheduler disabled unsuccessfully")
            else:
                reporting.add_test_step("Verify workload scheduler disabled",
                                        tvaultconf.PASS)
                LOG.debug("workload scheduler disabled successfully")

            # Verify interval value and nest_snapshot_run values
            schedule_details = self.getSchedulerDetails(self.wid)
            interval_after_disable = schedule_details['interval']

            if interval == interval_after_disable and 'nextrun' not in schedule_details:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.PASS)
                LOG.debug(
                    "Interval and Next snapshot run time values are correct")
            else:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.FAIL)
                raise Exception(
                    "Interval and Next snapshot run time values are incorrect")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_functional(self):
        try:
            ### VM and Workload ###
            reporting.add_test_script('tempest.api.workloadmgr.test_functional_oneclick_restore')
            status = 0
            deleted = 0
            vm_count = tvaultconf.vm_count
            key_pairs = self.create_kps(vm_count/3)
            LOG.debug("\nKey pairs : {}\n".format(key_pairs))
            sec_groups = self.create_sec_groups(vm_count/3)
            LOG.debug("\nSecurity Groups: {}\n".format(sec_groups))
            vms,boot_vols = self.multiple_vms(vm_count, key_pairs, sec_groups)
            LOG.debug("\nVMs : {}\n".format(vms))
            LOG.debug("\nBoot volumes : {}\n".format(boot_vols))
            vms = self.attach_vols(vms)
            LOG.debug("\nVolumes attached : {}\n".format(vms))
            mdsums_original = self.fill_data(vms)
            LOG.debug("\nMD5 sums before snapshots : {}\n".format(mdsums_original))
            wls = self.multiple_workloads(vms)
            LOG.debug("\nWorkloads created : {}\n".format(wls))

            ### Full snapshot ###

            fullsnaps = {}
            i = 0
            for workload_id in wls:
                i+=1
                snapshot_id=self.workload_snapshot(workload_id, True, snapshot_cleanup=True)
                time.sleep(5)
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getSnapshotStatus(workload_id, snapshot_id) == "available"):
                    reporting.add_test_step("Create full snapshot-{}".format(i), tvaultconf.PASS)
                    LOG.debug("Full snapshot available!!")
                else:
                    reporting.add_test_step("Create full snapshot-{}".format(i), tvaultconf.FAIL)
                    raise Exception("Snapshot creation failed")

                fullsnaps[workload_id] = snapshot_id

            LOG.debug("\nFull snapshot ids : {}\n".format(fullsnaps))

            ### One-click Restore ###

            self.delete_vms(vms.keys())
            time.sleep(60)
            deleted = 1

            restores = {}
            i = 0
          
            mdsums_oc = {} 
            instances_details = {}
            workloads = wls.items() 
            for workload in workloads:
                i+=1
                instance_details = []
                wid = workload[0]
                snapshotid = fullsnaps[wid]
                wlvms = workload[1]

                # Triggger one click restore #

                restore_id = self.snapshot_restore(wid, snapshotid, restore_cleanup=True)

                self.wait_for_snapshot_tobe_available(wid, snapshotid)
                if(self.getRestoreStatus(wid, snapshotid, restore_id) == "available"):
                    reporting.add_test_step("Oneclick-{}".format(i), tvaultconf.PASS)
                    LOG.debug('Oneclick restore passed')
                else:
                    reporting.add_test_step("Oneclick restore-{}".format(i), tvaultconf.FAIL)
                    LOG.debug('Oneclick restore failed')
                    raise Exception("Oneclick restore failed")

                restores[restore_id] = [wid, snapshotid]

                restored_vms = self.get_restored_vm_list(restore_id)
                vmdetails = {}
                restore_details = self.getRestoreDetails(restore_id)['instances']
                for arestore in restore_details:
                    vmdetails[arestore['id']] = arestore['metadata']['instance_id']
                
                LOG.debug("\nRestored vms : {}\n".format(restored_vms))
                volumes_parts = ["/dev/vdb", "/dev/vdc", "/dev/vdd"]
                mount_points = ["mount_data_a", "mount_data_b", "mount_data_c"]
                for rvm in restored_vms:
                    mdsum = ""
                    fip = ""
                    j = 0
                    rvmvols = self.get_attached_volumes(rvm)
                    LOG.debug("\nrvmvols : {}\n".format(rvmvols))
                    if len(rvmvols)>0:
                        for rvol in rvmvols:
                            if self.volumes_client.show_volume(rvol)['volume']['bootable'] == 'true':
                                rvmvols.remove(rvol)
                            else:
                                pass
                    if len(rvmvols)>0:
                        int_net_name = self.get_net_name(CONF.network.internal_network_id)
                        fip = self.get_vm_details(rvm)['server']['addresses'][int_net_name][1]['addr']
                        key = self.get_vm_details(rvm)['server']['key_name'] 
                        for rvolume in rvmvols:
                            LOG.debug("\nrvolume : {} & j {}\n".format(rvolume,j))
                            ssh = self.SshRemoteMachineConnectionWithRSAKeyName(str(fip), key)
                            self.execute_command_disk_mount(ssh, str(fip), [volumes_parts[j]], [mount_points[j]])
                            ssh.close()
                            mdsum = mdsum + self.calcmd5sum(fip, key, mount_points[j])
                            j+=1
                            mdsums_oc[vmdetails[rvm]] = mdsum
                    else:
                        pass

            LOG.debug("MD5SUMS before restore")
            LOG.debug(mdsums_original)
            LOG.debug("MD5SUMS after restore")
            LOG.debug(mdsums_oc)

            if cmp(mdsums_original, mdsums_oc) == 0 :
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
                status=1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if deleted == 0:
                try:
                    self.delete_vms(vms.keys())
                except:
                    pass
            if status !=1:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()
Esempio n. 30
0
    def test_1_tvault1157_1153_1154_1155_wlm_api_restart(self):
	reporting.add_test_script(str(__name__))
	try: 
	    # Change global job scheduler to disable using API
	    status = self.disable_global_job_scheduler()
	    if not status:
	 	reporting.add_test_step("Global job scheduler disable", tvaultconf.PASS)
		LOG.debug("Global job scheduler disabled successfully")
	    else:
		reporting.add_test_step("Global job scheduler disable", tvaultconf.FAIL)
		raise Exception ("Global job scheduler not disabled")

	    # Execute wlm-api service restart
	    status_update = self.restart_wlm_api_service()
	    if "active (running)" in str(status_update):
		reporting.add_test_step("verify wlm-api service is up and running after restart", tvaultconf.PASS)
		LOG.debug("wlm-api service is up and running")
	    else:
		reporting.add_test_step("verify wlm-api service is up and running after restart", tvaultconf.FAIL)
		raise Exception("wlm-api service is not restarted")

	    # Verify global job scheduler remains disabled even after wlm-api service restart
            status = self.get_global_job_scheduler_status()
            if not status:
                reporting.add_test_step("Global job scheduler remains disabled after wlm-api service restart", tvaultconf.PASS)
                LOG.debug("Global job scheduler remains disabled")
            else:
                reporting.add_test_step("Global job scheduler remains disabled after wlm-api service restart", tvaultconf.FAIL)
                LOG.debug("Global job scheduler changed")

	    # Change global job scheduler to enable using API
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable", tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable", tvaultconf.FAIL)
                LOG.debug("Global job scheduler not enabled")

	    # Execute wlm-api service restart
            status_update = self.restart_wlm_api_service() 
            if "active (running)" in str(status_update):
                reporting.add_test_step("verify wlm-api service is up and running after restart", tvaultconf.PASS)
                LOG.debug("wlm-api service is up and running")
            else:
                reporting.add_test_step("verify wlm-api service is up and running after restart", tvaultconf.FAIL)
                raise Exception("wlm-api service is not restarted")

            # Verify global job scheduler remains enabled even after wlm-api service restart
            status = self.get_global_job_scheduler_status()
            if status:
                reporting.add_test_step("Global job scheduler remains enabled after wlm-api service restart", tvaultconf.PASS)
                LOG.debug("Global job scheduler remains enabled")
            else:
                reporting.add_test_step("Global job scheduler remains enabled after wlm-api service restart", tvaultconf.FAIL)
                LOG.debug("Global job scheduler changed")

	    reporting.test_case_to_write()
	
	except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()