def test(): global agent_url global agent_url2 global vm flavor = case_flavor[os.environ.get('CASE_FLAVOR')] agent_url = flavor['agent_url'] script = ''' { entity -> throw new Exception("shuang") } ''' if agent_url != None: deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) l3net_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName3')).uuid is_flat = test_lib.lib_get_flat_dhcp_by_l3_uuid(l3net_uuid) if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'LocalStorage') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = local_pss[0].uuid image_creation_option = test_util.ImageOption() imagestore = test_lib.lib_get_image_store_backup_storage() bs_uuid = imagestore.uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) saved_db_stats = test_stub.get_db_stats(dhcp_ip) old_ip_count = test_stub.get_table_stats('UsedIpVO') vm.destroy() new_ip_count = test_stub.get_table_stats('UsedIpVO') if agent_url != None: if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None saved_db_stats2 = test_stub.get_db_stats(dhcp_ip) test_stub.compare_db_stats(saved_db_stats, saved_db_stats2, db_tables_white_list) if int(new_ip_count) != int(old_ip_count)-1: test_util.test_fail("UsedIpVO is expected to -1, %s -> %s" % (old_ip_count, new_ip_count))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global image agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 image_uuid = str(uuid.uuid4()).replace('-', '') rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": image_uuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) image_creation_option = test_util.ImageOption() imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') bs_uuid = imagestore.uuid image_option = test_util.ImageOption() image_option.set_uuid(image_uuid) image_option.set_name('fake_image') image_option.set_description('fake image') image_option.set_format('raw') image_option.set_mediaType('RootVolumeTemplate') image_option.set_backup_storage_uuid_list([bs_uuid]) image_option.url = "http://fake/fake.raw" image_option.set_timeout(24*60*60*1000) start = time.time() image = img_ops.add_image(image_option) end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'LocalStorage') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = local_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() target_host = test_lib.lib_find_random_host(vm.vm) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24 * 60 * 60 - 60) * 1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post( "http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({ "key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action) })) start = time.time() vol_ops.migrate_volume(vm.get_vm().rootVolumeUuid, target_host.uuid) end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("genisoimage -o %s/apache-tomcat/webapps/zstack/static/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'LocalStorage') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = local_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() target_host = test_lib.lib_find_random_host(vm.vm) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) start = time.time() vol_ops.migrate_volume(vm.get_vm().rootVolumeUuid, target_host.uuid) end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) command = "command -v genisoimage" result = test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) if not result: command = "yum -y install genisoimage --disablerepo=* --enablerepo=zstack-local" test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) command = "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % os.environ.get('zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("genisoimage -o %s/apache-tomcat/webapps/zstack/static/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): global agent_url global vm imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop') cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(5): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: test_util.test_pass('set HA after stopped VM test pass') test_util.test_fail('set HA after stopped VM test fail')
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global image imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required smp ps to test') ps_uuid = local_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24 * 60 * 60 - 60) * 1000 elif agent_action == 2: agent_time = 360 * 1000 image_uuid = str(uuid.uuid4()).replace('-', '') if agent_url == CREATE_TEMPLATE_FROM_VOLUME_PATH: rsp = dep_ops.json_post( "http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({ "key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action) })) else: rsp = dep_ops.json_post( "http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({ "key": image_uuid, "value": '{"%s":%s}' % (agent_url, agent_action) })) image_creation_option = test_util.ImageOption() bss = res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE, []) if len(bss) == 0: test_util.test_skip('Required sftp bs to test') bs_uuid = bss[0].uuid image_creation_option.set_uuid(image_uuid) image_creation_option.set_backup_storage_uuid_list([bs_uuid]) image_creation_option.set_root_volume_uuid(vm.vm.rootVolumeUuid) image_creation_option.set_name('test_create_root_volume_template_timeout') image_creation_option.set_timeout(24 * 60 * 60 * 1000) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_creation_option) start = time.time() image.create() end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): global vm global origin_value imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop') cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(5): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state != vm_header.RUNNING: test_util.test_fail('set HA after stopped VM test fail') no_exception = True try: config_ops.change_global_config('ha','host.check.successRatio', -1) no_exception = True except: test_util.test_logger('Expected exception') no_exception = False if no_exception: test_util.test_fail('Expect exception while there is none') origin_value = config_ops.change_global_config('ha','neverStopVm.scan.interval', '30') config_ops.change_global_config('ha','enable', 'false') vm.stop() cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(int(config_ops.get_global_config_value('ha','neverStopVm.scan.interval'))): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state != vm_header.STOPPED: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: test_util.test_fail('disable HA after stopped VM test fail') test_util.test_pass('set HA global config pass')
def test(): global agent_url global vm imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop') agent_url = KVM_MIGRATE_VM_PATH script = ''' { entity -> throw new Exception(\"shuang\") } ''' deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm.get_vm().uuid) start = time.time() no_exception = True if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm.get_vm()): try: vm_ops.migrate_vm(vm.get_vm().uuid, candidate_hosts.inventories[0].uuid) no_exception = True except: test_util.test_logger('Expected exception for VM migration') no_exception = False else: test_util.test_skip('Required migratable host to test') if no_exception: test_util.test_fail('Expect exception for migration while there is none') vm.stop() cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(5): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: test_util.test_pass('HA after migrate failure test pass') test_util.test_fail('HA after migrate failure test fail')
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global image imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required smp ps to test') ps_uuid = local_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 image_uuid = str(uuid.uuid4()).replace('-', '') if agent_url == CREATE_TEMPLATE_FROM_VOLUME_PATH: rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) else: rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": image_uuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) image_creation_option = test_util.ImageOption() bss = res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE, []) if len(bss) == 0: test_util.test_skip('Required sftp bs to test') bs_uuid = bss[0].uuid image_creation_option.set_uuid(image_uuid) image_creation_option.set_backup_storage_uuid_list([bs_uuid]) image_creation_option.set_root_volume_uuid(vm.vm.rootVolumeUuid) image_creation_option.set_name('test_create_root_volume_template_timeout') image_creation_option.set_timeout(24*60*60*1000) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_creation_option) start = time.time() image.create() end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global image imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) ceph_pss = res_ops.query_resource(res_ops.CEPH_PRIMARY_STORAGE, []) if len(ceph_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = ceph_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24 * 60 * 60 - 60) * 1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post( "http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({ "key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action) })) image_creation_option = test_util.ImageOption() bss = res_ops.query_resource(res_ops.CEPH_BACKUP_STORAGE, []) if len(bss) == 0: test_util.test_skip('Required ceph bs to test') bs_uuid = bss[0].uuid image_creation_option.set_backup_storage_uuid_list([bs_uuid]) image_creation_option.set_root_volume_uuid(vm.vm.rootVolumeUuid) image_creation_option.set_name('test_create_root_volume_template_timeout') image_creation_option.set_timeout(24 * 60 * 60 * 1000) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_creation_option) start = time.time() image.create() end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): test_util.test_dsc(''' Will doing random test operations, including vm create/stop/start/reboot /destroy, volume create/attach/detach/delete. It doesn't include SG VIP and snapshots operations. If reach max 4 coexisting running vm, testing will success and quit. ''') target_running_vm = 4 test_util.test_dsc( 'Random Test Begin. Test target: 4 coexisting running VM (not include VR).' ) robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) extra_exclusive_actions = [] if test_lib.lib_get_image_store_backup_storage() != None: extra_exclusive_actions = [ test_state.TestAction.create_data_vol_template_from_volume, test_state.TestAction.create_image_from_volume ] robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.sg_actions \ + test_state.TestAction.vip_actions \ + test_state.TestAction.snapshot_actions \ + extra_exclusive_actions) priority_actions = test_state.TestAction.volume_actions * 4 priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: print "test_dict: %s" % test_dict test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('Round %s finished. Begin status checking.' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): global agent_url global agent_url2 global vm saved_db_stats = get_db_stats() flavor = case_flavor[os.environ.get('CASE_FLAVOR')] agent_url = flavor['agent_url'] script = ''' { entity -> throw new Exception("shuang") } ''' if agent_url != None: deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) if agent_url == FLAT_DHCP_RELEASE or agent_url == NFS_DELETE: agent_url2 = VOLUME_CLONE deploy_operations.remove_simulator_agent_script(agent_url2) deploy_operations.deploy_simulator_agent_script(agent_url2, script) imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) ceph_pss = res_ops.query_resource(res_ops.CEPH_PRIMARY_STORAGE, []) if len(ceph_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = ceph_pss[0].uuid create_vm_failure = False try: vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) except: create_vm_failure = True if agent_url != None and not create_vm_failure: test_util.test_fail("Expect failure during creating VM while it passed. Test Exception handling for Create VM FAIL") if agent_url != None: saved_db_stats2 = get_db_stats() for key in saved_db_stats2: if saved_db_stats2[key] != saved_db_stats[key] and key not in db_tables_white_list: test_util.test_fail("DB Table %s changed %s -> %s" % (key, saved_db_stats[key], saved_db_stats2[key]))
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) command = "command -v genisoimage" result = test_lib.lib_execute_ssh_cmd( os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) if not result: command = "yum -y install genisoimage" test_lib.lib_execute_ssh_cmd( os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) command = "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % os.environ.get( 'zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url( 'http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail( 'Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): test_util.test_dsc(''' Will doing random test operations, including vm create/stop/start/reboot /destroy, volume create/attach/detach/delete. It doesn't include SG VIP and snapshots operations. If reach max 4 coexisting running vm, testing will success and quit. ''') target_running_vm = 4 test_util.test_dsc('Random Test Begin. Test target: 4 coexisting running VM (not include VR).') robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) extra_exclusive_actions = [] if test_lib.lib_get_image_store_backup_storage() != None: extra_exclusive_actions = [test_state.TestAction.create_data_vol_template_from_volume, test_state.TestAction.create_image_from_volume] robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.sg_actions \ + test_state.TestAction.vip_actions \ + test_state.TestAction.snapshot_actions \ + [ test_state.TestAction.create_volume ] \ + extra_exclusive_actions) priority_actions = test_state.TestAction.volume_actions * 4 priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: print "test_dict: %s" % test_dict test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('Round %s finished. Begin status checking.' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global image imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) ceph_pss = res_ops.query_resource(res_ops.CEPH_PRIMARY_STORAGE, []) if len(ceph_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = ceph_pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": vm.get_vm().rootVolumeUuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) image_creation_option = test_util.ImageOption() bss = res_ops.query_resource(res_ops.CEPH_BACKUP_STORAGE, []) if len(bss) == 0: test_util.test_skip('Required ceph bs to test') bs_uuid = bss[0].uuid image_creation_option.set_backup_storage_uuid_list([bs_uuid]) image_creation_option.set_root_volume_uuid(vm.vm.rootVolumeUuid) image_creation_option.set_name('test_create_root_volume_template_timeout') image_creation_option.set_timeout(24*60*60*1000) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_creation_option) start = time.time() image.create() end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global image agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24 * 60 * 60 - 60) * 1000 elif agent_action == 2: agent_time = 360 * 1000 image_uuid = str(uuid.uuid4()).replace('-', '') rsp = dep_ops.json_post( "http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({ "key": image_uuid, "value": '{"%s":%s}' % (agent_url, agent_action) })) image_creation_option = test_util.ImageOption() imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') bs_uuid = imagestore.uuid image_option = test_util.ImageOption() image_option.set_uuid(image_uuid) image_option.set_name('fake_image') image_option.set_description('fake image') image_option.set_format('raw') image_option.set_mediaType('RootVolumeTemplate') image_option.set_backup_storage_uuid_list([bs_uuid]) image_option.url = "http://fake/fake.raw" image_option.set_timeout(24 * 60 * 60 * 1000) start = time.time() image = img_ops.add_image(image_option) end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global live_migration ps_type = flavor['ps'] if ps_type == "Local": live_migration = config_ops.get_global_config_value('localStoragePrimaryStorage', 'liveMigrationWithStorage.allow') config_ops.change_global_config('localStoragePrimaryStorage', 'liveMigrationWithStorage.allow', 'true') imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', ps_type) pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": vm.get_vm().uuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm.get_vm().uuid) start = time.time() if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm.get_vm()): vm_ops.migrate_vm(vm.get_vm().uuid, candidate_hosts.inventories[0].uuid) else: test_util.test_skip('Required migratable host to test') end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def test(): global test_stub,test_obj_dict vm = test_stub.create_x86_vm() test_obj_dict.add_vm(vm) vm.check() volume = test_stub.create_volume() test_obj_dict.add_volume(volume) volume.check() volume.delete() volume.check() volume.recover() volume.check() volume.attach(vm) volume.check() bs_uuid = test_lib.lib_get_image_store_backup_storage().uuid volume_tem = volume.create_template([bs_uuid]) test_obj_dict.add_image(volume_tem) volume.detach(vm.get_vm().uuid) volume.check() host_uuid=test_lib.lib_find_random_host_by_volume_uuid(volume.get_volume().uuid).uuid volume.migrate(host_uuid) volume.check() volume.delete() volume.check() volume.expunge() volume.check() # time.sleep(5) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create VM Test Success')
def test(): global agent_url global agent_url2 global vm flavor = case_flavor[os.environ.get('CASE_FLAVOR')] agent_url = flavor['agent_url'] script = ''' { entity -> throw new Exception("shuang") } ''' l3net_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName3')).uuid is_flat = test_lib.lib_get_flat_dhcp_by_l3_uuid(l3net_uuid) if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') cond = res_ops.gen_query_conditions('type', '=', 'NFS') local_pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(local_pss) == 0: test_util.test_skip('Required nfs ps to test') ps_uuid = local_pss[0].uuid bs_uuid = imagestore.uuid image_option = test_util.ImageOption() image_option.set_name('fake_image') image_option.set_description('fake image') image_option.set_format('raw') image_option.set_mediaType('RootVolumeTemplate') image_option.set_backup_storage_uuid_list([bs_uuid]) image_option.url = "http://fake/fake.raw" image = img_ops.add_image(image_option) image_uuid = image.uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.destroy() vm.expunge() if agent_url != None: deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) if agent_url == FLAT_DHCP_RELEASE or agent_url == NFS_DELETE: agent_url2 = NFS_SFTP_CREATE_VOLUME_FROM_TEMPLATE deploy_operations.remove_simulator_agent_script(agent_url2) deploy_operations.deploy_simulator_agent_script(agent_url2, script) saved_db_stats = test_stub.get_db_stats(dhcp_ip) create_vm_failure = False try: vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) except: create_vm_failure = True if agent_url != None and not create_vm_failure: test_util.test_fail("Expect failure during creating VM while it passed. Test Exception handling for Create VM FAIL") if agent_url != None: if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None saved_db_stats2 = test_stub.get_db_stats(dhcp_ip) test_stub.compare_db_stats(saved_db_stats, saved_db_stats2, db_tables_white_list) test_util.test_pass("Test Exception handling for Create VM PASS")
def test(): global agent_url global agent_url2 global vm flavor = case_flavor[os.environ.get('CASE_FLAVOR')] agent_url = flavor['agent_url'] script = ''' { entity -> throw new Exception("shuang") } ''' l3net_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName3')).uuid is_flat = test_lib.lib_get_flat_dhcp_by_l3_uuid(l3net_uuid) if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) ceph_pss = res_ops.query_resource(res_ops.CEPH_PRIMARY_STORAGE, []) if len(ceph_pss) == 0: test_util.test_skip('Required ceph ps to test') ps_uuid = ceph_pss[0].uuid if agent_url == CHECK_BITS: vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.destroy() vm.expunge() if agent_url != None: deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) if agent_url == CEPH_DELETE: agent_url2 = VOLUME_CLONE deploy_operations.remove_simulator_agent_script(agent_url2) deploy_operations.deploy_simulator_agent_script(agent_url2, script) saved_db_stats = test_stub.get_db_stats(dhcp_ip) test_stub.print_table('AccountResourceRefVO') create_vm_failure = False try: vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) except: create_vm_failure = True if agent_url != None and not create_vm_failure: test_util.test_fail("Expect failure during creating VM while it passed. Test Exception handling for Create VM FAIL") if agent_url != None: if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None saved_db_stats2 = test_stub.get_db_stats(dhcp_ip) test_stub.print_table('AccountResourceRefVO') test_stub.compare_db_stats(saved_db_stats, saved_db_stats2, db_tables_white_list)
def test(): global job1 global job2 global job_group global trigger1 global trigger2 imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) volume = test_stub.create_volume() volume.attach(vm2) test_obj_dict.add_vm(vm1) test_obj_dict.add_vm(vm2) test_obj_dict.add_volume(volume) parameters = {'retentionType': 'Count', 'retentionValue': '10', 'backupStorageUuids': imagestore.uuid, 'remoteBackupStorageUuid': '', 'networkWriteBandwidth': '', 'networkReadBandwidth': '', 'volumeReadBandwidth': '', 'volumeWriteBandwidth': ''} test_util.test_logger(parameters) job1 = sch_ops.create_scheduler_job(name='root_volume', description='vm1 root volume backup', target_uuid=vm1.get_vm().allVolumes[0].uuid, type=volumeBackup, parameters=parameters) job2 = sch_ops.create_scheduler_job(name='data_volume', description='data volume backup', target_uuid=volume.get_volume().uuid, type=volumeBackup, parameters=parameters) name1 = 'job_group' job_group = sch_ops.create_scheduler_job_group(name=name1, description='vmbackup', type=volumeBackup, parameters=parameters) cond = res_ops.gen_query_conditions('uuid', '=', job_group.uuid) sch_ops.add_jobs_to_job_group([job1.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 1 sch_ops.add_jobs_to_job_group([job2.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 2 sch_ops.remove_jobs_from_job_group([job2.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 1 sch_ops.add_jobs_to_job_group([job2.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 2 sch_ops.remove_jobs_from_job_group([job1.uuid, job2.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 0 sch_ops.add_jobs_to_job_group([job1.uuid, job2.uuid], job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 2 sch_ops.del_scheduler_job(job2.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 1 job2 = None trigger1 = sch_ops.create_scheduler_trigger('10sec', start_time=int(time.time() + 5), type='cron', cron='0/10 * * * * ?') trigger2 = sch_ops.create_scheduler_trigger('30sec', start_time=int(time.time() + 5), type='cron', cron='0/30 * * * * ?') sch_ops.add_scheduler_job_group_to_trigger(trigger1.uuid, job_group.uuid, triggerNow=True) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 1 sch_ops.add_scheduler_job_group_to_trigger(trigger2.uuid, job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 2 sch_ops.remove_scheduler_job_group_from_trigger(trigger2.uuid, job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 1 sch_ops.add_scheduler_job_group_to_trigger(trigger2.uuid, job_group.uuid, triggerNow=True) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 2 sch_ops.del_scheduler_trigger(trigger2.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 1 trigger2 = None sch_ops.del_scheduler_job_group(job_group.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond) assert len(job_group_inv) == 0 job_group = None cond1 = res_ops.gen_query_conditions('uuid', '=', job1.uuid) cond2 = res_ops.gen_query_conditions('uuid', '=', trigger1.uuid) job_inv = res_ops.query_resource(res_ops.SCHEDULERJOB, cond1) assert len(job_inv) == 0 trigger_inv = res_ops.query_resource(res_ops.SCHEDULERTRIGGER, cond2) assert len(trigger_inv) == 1 trigger1 = None job1 = None test_lib.lib_robot_cleanup(test_obj_dict)
def test(): global agent_url global agent_url2 global vm flavor = case_flavor[os.environ.get('CASE_FLAVOR')] agent_url = flavor['agent_url'] script = ''' { entity -> throw new Exception("shuang") } ''' if agent_url != None: deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) if agent_url == FLAT_DHCP_RELEASE or agent_url == SBLK_DELETE: agent_url2 = CREATE_VOLUME deploy_operations.remove_simulator_agent_script(agent_url2) deploy_operations.deploy_simulator_agent_script(agent_url2, script) l3net_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName3')).uuid is_flat = test_lib.lib_get_flat_dhcp_by_l3_uuid(l3net_uuid) if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedBlock') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required smp ps to test') ps_uuid = pss[0].uuid saved_db_stats = test_stub.get_db_stats(dhcp_ip) create_vm_failure = False try: vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) except: create_vm_failure = True if agent_url != None and not create_vm_failure: test_util.test_fail("Expect failure during creating VM while it passed. Test Exception handling for Create VM FAIL") if agent_url != None: if is_flat: try: dhcp_ip = net_ops.get_l3network_dhcp_ip(l3net_uuid) except: dhcp_ip = None else: dhcp_ip = None saved_db_stats2 = test_stub.get_db_stats(dhcp_ip) test_stub.compare_db_stats(saved_db_stats, saved_db_stats2, db_tables_white_list)
def test(): imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE) ps_uuid = pss[0].uuid vmBackup = 'vmBackup' volumeBackup = 'volumeBackup' parameters = {'retentionType': 'Count', 'retentionValue': '10', 'backupStorageUuids': imagestore.uuid, 'remoteBackupStorageUuid': '', 'networkWriteBandwidth': '', 'networkReadBandwidth': '', 'volumeReadBandwidth': '', 'volumeWriteBandwidth': ''} test_util.test_logger(parameters) vm1 = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm1_uuid = vm1.get_vm().uuid volume1 = test_stub.create_volume() vol1_uuid = volume1.get_volume().uuid test_lib.lib_attach_volume(vol1_uuid, vm1_uuid) job1 = sch_ops.create_scheduler_job(name='vm1', description='vm1 with volume backup', target_uuid=vm1_uuid, type=vmBackup, parameters=parameters) vm2 = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm2_uuid = vm2.get_vm().uuid volume2 = test_stub.create_volume() vol2_uuid = volume2.get_volume().uuid test_lib.lib_attach_volume(vol2_uuid, vm2_uuid) job2 = sch_ops.create_scheduler_job(name='vm2-root', description='vm2 root volume backup', target_uuid=vm2.get_vm().allVolumes[0].uuid, type=volumeBackup, parameters=parameters) name1 = 'job_group_1' job_group_1 = sch_ops.create_scheduler_job_group(name=name1, description='vmbackup', type=vmBackup, parameters=parameters) sch_ops.add_jobs_to_job_group([job1.uuid], job_group_1.uuid) name2 = 'job_group_2' job_group_2 = sch_ops.create_scheduler_job_group(name=name2, description='volumebackup', type=volumeBackup, parameters=parameters) sch_ops.add_jobs_to_job_group([job2.uuid], job_group_2.uuid) trigger = sch_ops.create_scheduler_trigger('10sec', start_time = int(time.time()+5), type = 'cron', cron = '0/10 * * * * ?') sch_ops.add_scheduler_job_group_to_trigger(trigger.uuid, job_group_1.uuid, triggerNow=True) sch_ops.add_scheduler_job_group_to_trigger(trigger.uuid, job_group_2.uuid, triggerNow=False) time.sleep(9) # check # vm1/vol1 backups.lenth == 2 # vm2 backups.lenth == 1 cond = res_ops.gen_query_conditions('volumeUuid', '=', vm1.get_vm().allVolumes[0].uuid) backups = res_ops.query_resource(res_ops.VOLUME_BACKUP, cond) assert len(backups) == 2 cond = res_ops.gen_query_conditions('volumeUuid', '=', volume1.get_volume().uuid) backups = res_ops.query_resource(res_ops.VOLUME_BACKUP, cond) assert len(backups) == 2 cond = res_ops.gen_query_conditions('volumeUuid', '=', vm1.get_vm().allVolumes[0].uuid) backups = res_ops.query_resource(res_ops.VOLUME_BACKUP, cond) assert len(backups) == 1 cond = res_ops.gen_query_conditions('volumeUuid', '=', volume1.get_volume().uuid) backups = res_ops.query_resource(res_ops.VOLUME_BACKUP, cond) assert len(backups) == 0 sch_ops.remove_scheduler_job_from_trigger(trigger.uuid, job_group_1.uuid) # sch_ops.remove_scheduler_job_from_trigger(trigger.uuid, job_group_2.uuid) cond = res_ops.gen_query_conditions('uuid', '=', job_group_1.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 0 sch_ops.del_scheduler_trigger(trigger.uuid) cond = res_ops.gen_query_conditions('uuid', '=', trigger.uuid) trigger_inv = res_ops.query_resource(res_ops.SCHEDULERTRIGGER, cond) cond = res_ops.gen_query_conditions('uuid', '=', job_group_2.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.triggersUuid) == 0 assert len(trigger_inv) == 0 sch_ops.remove_jobs_from_job_group([job1.uuid], job_group_1.uuid) cond = res_ops.gen_query_conditions('uuid', '=', job_group_1.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0] assert len(job_group_inv.jobsUuid) == 0 sch_ops.del_scheduler_job_group(job_group_2.uuid) cond = res_ops.gen_query_conditions('uuid', '=', job_group_2.uuid) job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond) assert len(job_group_inv) == 0 cond = res_ops.gen_query_conditions('uuid', '=', job2.uuid) job_inv = res_ops.query_resource(res_ops.SCHEDULERJOB, cond) assert len(job_inv) == 0