def main(args): api = scaffold.get_api() print('Using Config:') scaffold.print_config() ais = [] for v in args.vols.split(','): ais.append(api.app_instances.get(v)) mount_volumes(api, ais, args.multipath, args.fs, args.fsargs, args.directory, args.workers, args.login_only) return SUCCESS
def main(args): global VERBOSE VERBOSE = args.verbose if args.tenant: set_conf_tenant(args.tenant) restart_cvol() api = scaffold.get_api() config = scaffold.get_config() print("Using Config") scaffold.print_config() # Tests ptests = set(_TESTS) tests = set() if args.filter: for f in args.filter: tests.update( filter(lambda x: f in x.__name__ or f == x.__name__, ptests)) else: tests = ptests if args.list_tests: print("TESTS") print("-----") for test in sorted(tests): print(test.__name__) sys.exit(0) for test in sorted(tests): if args.stop_on_failure and len(_FAIL) > 0: print("Detected failure, stopping tests") sys.exit(1) test(api) print() print("----------") print("| REPORT |") print("----------") print("Tenant:", config['tenant']) print("PASSED:", len(_PASS)) print("FAILED:", len(_FAIL)) for name, e in _FAIL: print(name) print(e) print("XFAILED:", len(_XFAIL)) for name, e in _XFAIL: print(name) print(e) print("SKIPPED:", len(_SKIP))
def main(args): raise NotImplementedError("This script has not been fully implemented") api = scaffold.get_api() scaffold.print_config() api.app_instances.list() config.load_kube_config() v1 = client.CoreV1Api() print("Finding Flex volumes") flex_vols = [] ret = v1.list_persistent_volume_claim_for_all_namespaces(watch=False) for elem in ret.items: if RE.match(elem.spec.volume_name): flex_vols.append({ "name": elem.spec.volume_name, "size": elem.status.capacity.storage }) print("{}".format(elem))
def main(args): if args.csi_yaml: udc_envs_from_csi_yaml(args.csi_yaml) api = scaffold.get_api() print('Using Config:') scaffold.print_config() if args.health: if not run_health(api): return FAILURE return SUCCESS if args.force_initiator_creation: resp = input( hf("Forcing initiator creation could result in I/O " "interruption for Volumes connected to the forced " "Initiator being created within this tenant. Are you " "Sure you want to proceed? Y/n") + "\n") if resp != "Y": print("Recieved negative confirmation, exiting") return FAILURE else: print("Recieved positive confirmation. Continuing") if args.show_at_url: for url in args.show_at_url: show.at_url(api, url) return SUCCESS for arg in args.list: detail = 'detail' in arg if 'volumes' in arg: print( "###### VOLUMES {}######".format("DETAIL " if detail else '')) for vol in args.volume: list_volumes('local', api, vol, detail=detail) if not args.volume: list_volumes('local', api, 'prefix=all', detail) elif 'templates' in arg: print("###### TEMPLATES {}######".format( "DETAIL " if detail else '')) list_templates(api, detail=detail) elif 'mounts' in arg: print("###### MOUNTS ######".format("DETAIL " if detail else '')) for vol in args.volume: list_mounts('local', api, vol, detail, not args.no_multipath) else: list_mounts('local', api, 'prefix=all', detail, not args.no_multipath) elif 'alerts' in arg: print("###### ALERTS ######") list_alerts(api) if 'events' in arg: print("###### EVENTS ######") if 'system' in arg: user = '******' if 'user' in arg: user = '******' if 'id' in arg: user = args.id list_events(api, user) if 'media-policy' in arg: print("###### MEDIA POLICIES ######") list_media_policies(api) if 'placement-policy' in arg: print("###### PLACEMENT POLICIES ######") list_placement_policies(api) if args.clear_alerts: clear_alerts(api) return SUCCESS if any((args.unmount, args.logout, args.clean)): for vol in args.volume: del_keys(vol) clean_mounts(api, vol, args.directory, args.workers) if args.unmount: return SUCCESS if args.clean: for vol in args.volume: clean_volumes(api, vol, args.workers) for pp in args.placement_policy: delete_placement_policy(api, pp) for mp in args.media_policy: delete_media_policy(api, mp) return SUCCESS if args.logout: return SUCCESS # Create placement_policy/media_policy for mp in args.media_policy: create_media_policy(api, mp) for pp in args.placement_policy: create_placement_policy(api, pp) # Create volumes vols = None for vol in args.volume: vols = create_volumes("local", api, vol, args.workers) if args.get_keys: for vol in args.volume: for n, keys in get_keys(vol): print(n, ":", ' '.join(map(str, keys))) # Login/mount volumes login_only = not args.mount and args.login if (args.mount or args.login) and vols: dev_or_folders = mount_volumes(api, vols, not args.no_multipath, args.fstype, args.fsargs, args.directory, args.workers, login_only, args.force_initiator_creation, args.initiator_path) # Generate fio/vdbench output if args.fio: try: exe("which fio") except EnvironmentError: print("FIO is not installed") if args.fio and (not args.mount and not args.login): print("--mount or --login MUST be specified when using --fio") elif args.fio: gen_fio(args.fio_workload, dev_or_folders) elif args.vdbench: gen_vdb(dev_or_folders) # Retrieve metrics if args.metrics: data = None try: interval, timeout = map(int, args.metrics.split(',')) if interval < 1 or timeout < 1: raise ValueError() mtypes = args.metrics_type if not args.metrics_type: mtypes = ['iops_write'] data = get_metrics(api, mtypes, args.volume, interval, timeout, args.metrics_op) except ValueError: print("--metrics argument must be in format '--metrics i,t' where" "'i' is the interval in seconds and 't' is the timeout in " "seconds. Both must be positive integers >= 1") return FAILURE if data: write_metrics(data, args.metrics_out_file) else: print("No data recieved from metrics") return FAILURE return SUCCESS
def main(args): api = scaffold.get_api() print('Using Config:') scaffold.print_config() clean_mounts(api, args.vols, args.directory, args.workers) return SUCCESS
def main(args): api = scaffold.get_api() print('Using Config:') scaffold.print_config() found = None # LIST/HEALTH OPERATIONS if args.op == 'health-check': run_health(api) return SUCCESS elif args.op == 'list-snaps': app_snaps, vol_snaps = find_snaps(api, args.name, args.id) print_snaps(app_snaps, vol_snaps) return SUCCESS elif args.op == 'list-snaps-pretty': app_snaps, vol_snaps = find_snaps(api, args.name, args.id) print_pretty_snaps(api, app_snaps, vol_snaps) return SUCCESS # FIND RESOURCE elif args.op == 'find-vol': found = find_vol(api, args.name, args.id) if found: print("Found volume:", found['name']) print("=============") else: print("No volume found matching name {} or id {}".format( args.name, args.id)) return FAILURE elif args.op == 'find-app': found = find_app(api, args.name, args.id) if found: print("Found AppInstance:", found['name']) print("=============") else: print("No AppInstance found matching name {} or id {}".format( args.name, args.id)) return FAILURE elif args.op == 'find-snap': found = find_snap(api, args.id) if found: print("Found Snapshot:", args.id) print("=============") else: print("No Snapshot found matching name {} or id {}".format( args.name, args.id)) return FAILURE elif args.op == 'find-from-mount': if not args.path: raise ValueError("find-from-mount requires --path argument") found = find_from_mount(api, args.path, 'vol') print("Found Volume:", found['name']) print("============") elif args.op == 'find-ai-from-mount': if not args.path: raise ValueError("find-from-mount requires --path argument") found = find_from_mount(api, args.path, 'ai') print("Found AppInstance:", found['name']) print("============") elif args.op == 'find-from-device-path': if not args.path: raise ValueError("find-from-device-path requires --path argument") found = find_from_device_path(api, args.path) print("Found Volume:", found['name']) print("============") elif args.op == 'find-ai-from-device-path': if not args.path: raise ValueError("find-from-device-path requires --path argument") found = find_ai_from_device_path(api, args.path) print("Found AppInstance:", found['name']) print("============") print(found) # CHANGE STATE OF FOUND RESOURCE if args.placement_mode: set_placement(api, found, args.placement_mode) print("Set placement_mode for {} to {}".format(found.path, found.placement_mode)) if args.repair_priority: set_repair_priority(api, found, args.repair_priority) print("Set repair_priority for {} to {}".format( found.path, found.repair_priority)) if args.make_snap: snap = make_snap(api, found) print("Created snapshot:", snap.path) if args.extend: set_size(api, found, args.extend) print("Extended volume: %s", found.path) if args.rollback: set_rollback(api, found, args.rollback) print("Rolled-back resource {} to {}".format( found.path, (args.rollback if args.rollback != "None" else found.path.split("/")[-1]))) if args.remount: print("Remounting resource: {}".format(found.path)) # HANDLE CLEAN MOUNTS/LOGINS if (args.clean or args.remount) and found: # Skip cleaning mounts for snapshots since they don't have any if not hasattr(found, 'utc_ts'): ai = ai_from_resource(api, found) else: print("Can't clean mounts for snapshot resources") clean_mounts(api, [ai], args.directory, 1) # HANDLE LOGIN/MOUNT/REMOUNT if (args.mount or args.login) and found: ais = [] # Mount snapshot objects by creating a new AppInstance first if hasattr(found, 'utc_ts'): ai = new_app_from_snap(api, found) ais.append(ai) else: ai = ai_from_resource(api, found) if args.all_snaps: app_snaps, vol_snaps = find_snaps(api, None, ai.id) for snap in app_snaps + vol_snaps: ais.append(new_app_from_snap(api, snap)) else: ais.append(ai) mount_volumes(api, ais, not args.no_multipath, args.fstype, args.fsargs, args.directory, 1, args.login) return SUCCESS
def main(args): api = scaffold.get_api() print('Using Config:') scaffold.print_config() if args.health: if not run_health(api): return FAILURE return SUCCESS if 'volumes' in args.list: for vol in args.volume: list_volumes(args.run_host, api, vol, detail='detail' in args.list) return SUCCESS elif 'templates' in args.list: list_templates(api, detail='detail' in args.list) elif 'mounts' in args.list: for vol in args.volume: list_mounts(args.run_host, api, vol, 'detail' in args.list, not args.no_multipath) return SUCCESS if any((args.unmount, args.logout, args.clean)): for vol in args.volume: if args.run_host == 'local': clean_mounts(api, vol, args.directory, args.workers) else: clean_mounts_remote(args.run_host, vol, args.directory, args.workers) if args.unmount: return SUCCESS if args.clean: for vol in args.volume: clean_volumes(api, vol, args.workers) return SUCCESS if args.logout: return SUCCESS vols = None for vol in args.volume: vols = create_volumes(args.run_host, api, vol, args.workers) login_only = not args.mount and args.login if (args.mount or args.login) and vols and args.run_host == 'local': dev_or_folders = mount_volumes(api, vols, not args.no_multipath, args.fstype, args.fsargs, args.directory, args.workers, login_only) elif (args.mount or args.login) and vols and args.run_host != 'local': dev_or_folders = mount_volumes_remote(args.run_host, vols, not args.no_multipath, args.fstype, args.fsargs, args.directory, args.workers, login_only) if args.fio: try: exe("which fio") except EnvironmentError: print("FIO is not installed") if args.fio and (not args.mount and not args.login): print("--mount or --login MUST be specified when using --fio") elif args.fio and args.run_host == 'local': gen_fio(args.fio_workload, dev_or_folders) elif args.fio and args.run_host != 'local': gen_fio_remote(args.run_host, args.fio_workload, dev_or_folders) if args.metrics: data = None try: interval, timeout = map(int, args.metrics.split(',')) if interval < 1 or timeout < 1: raise ValueError() mtypes = args.metrics_type if not args.metrics_type: mtypes = ['iops_write'] data = get_metrics(api, mtypes, args.volume, interval, timeout, args.metrics_op) except ValueError: print("--metrics argument must be in format '--metrics i,t' where" "'i' is the interval in seconds and 't' is the timeout in " "seconds. Both must be positive integers >= 1") return FAILURE if data: write_metrics(data, args.metrics_out_file) else: print("No data recieved from metrics") return FAILURE return SUCCESS