def pytest_configure(config): if config.getoption('--help'): return reporter = terminalreporter.reporter() if config.getoption('--dummy-appliance'): appliances = [DummyAppliance.from_config(config)] reporter.write_line('Retrieved Dummy Appliance', red=True) elif stack.top: appliances = [stack.top] elif config.option.appliances: appliances = appliances_from_cli(config.option.appliances, config.option.appliance_version) reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True) elif config.getoption('--use-sprout'): from cfme.test_framework.sprout.plugin import mangle_in_sprout_appliances mangle_in_sprout_appliances(config) # TODO : handle direct sprout pass on? appliances = appliances_from_cli(config.option.appliances, None) reporter.write_line('Retrieved these appliances from the --sprout-* parameters', red=True) else: appliances = load_appliances_from_config(conf.env) reporter.write_line('Retrieved these appliances from the conf.env', red=True) if not stack.top: for appliance in appliances: reporter.write_line('* {!r}'.format(appliance), cyan=True) appliance = appliances[0] if not appliance.is_dev: appliance.set_session_timeout(86400) stack.push(appliance) plugin = ApplianceHolderPlugin(appliance, appliances) config.pluginmanager.register(plugin, PLUGIN_KEY)
def pytest_configure(config): reporter = terminalreporter.reporter() if config.getoption('--dummy-appliance'): appliances = [DummyAppliance.from_config(config)] reporter.write_line('Retrieved Dummy Appliance', red=True) elif stack.top: appliances = [stack.top] elif config.option.appliances: appliances = appliances_from_cli(config.option.appliances) reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True) elif config.getoption('--use-sprout'): from .sprout.plugin import mangle_in_sprout_appliances mangle_in_sprout_appliances(config) appliances = appliances_from_cli(config.option.appliances) reporter.write_line('Retrieved these appliances from the --sprout-* parameters', red=True) else: appliances = load_appliances_from_config(conf.env) reporter.write_line('Retrieved these appliances from the conf.env', red=True) if not stack.top: for appliance in appliances: reporter.write_line('* {!r}'.format(appliance), cyan=True) appliance = appliances[0] appliance.set_session_timeout(86400) stack.push(appliance) plugin = ApplianceHolderPlugin(appliance, appliances) config.pluginmanager.register(plugin, "appliance-holder")
def pytest_configure(config): reporter = terminalreporter.reporter() if config.getoption('--dummy-appliance'): appliances = [DummyAppliance.from_config(config)] reporter.write_line('Retrieved Dummy Appliance', red=True) elif stack.top: appliances = [stack.top] elif config.option.appliances: appliances = appliances_from_cli(config.option.appliances, config.option.appliance_version) reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True) elif config.getoption('--use-sprout'): from .sprout.plugin import mangle_in_sprout_appliances mangle_in_sprout_appliances(config) # TODO : handle direct sprout pass on? appliances = appliances_from_cli(config.option.appliances, None) reporter.write_line('Retrieved these appliances from the --sprout-* parameters', red=True) else: appliances = load_appliances_from_config(conf.env) reporter.write_line('Retrieved these appliances from the conf.env', red=True) if not stack.top: for appliance in appliances: reporter.write_line('* {!r}'.format(appliance), cyan=True) appliance = appliances[0] if not appliance.is_dev: appliance.set_session_timeout(86400) stack.push(appliance) plugin = ApplianceHolderPlugin(appliance, appliances) config.pluginmanager.register(plugin, PLUGIN_KEY)
def pytest_configure(config): if config.getoption('--help'): return reporter = terminalreporter.reporter() if config.getoption('--dummy-appliance'): appliances = [ DummyAppliance.from_config(config) for _ in range(config.getoption('--num-dummies')) ] if not config.option.collectonly: config.option.collectonly = True reporter.write_line('Retrieved Dummy Appliance', red=True) elif stack.top: appliances = [stack.top] elif config.option.appliances: appliances = appliances_from_cli(config.option.appliances, config.option.appliance_version) reporter.write_line( 'Retrieved these appliances from the --appliance parameters', red=True) elif config.getoption('--use-sprout'): from cfme.test_framework.sprout.plugin import mangle_in_sprout_appliances mangle_in_sprout_appliances(config) # TODO : handle direct sprout pass on? appliances = appliances_from_cli(config.option.appliances, None) reporter.write_line( 'Retrieved these appliances from the --sprout-* parameters', red=True) else: appliances = load_appliances_from_config(conf.env) reporter.write_line('Retrieved these appliances from the conf.env', red=True) if not stack.top: for appliance in appliances: reporter.write_line(f'* {appliance!r}', cyan=True) appliance = appliances[0] stack.push(appliance) plugin = ApplianceHolderPlugin(appliance, appliances) config.pluginmanager.register(plugin, PLUGIN_KEY) if not any((isinstance(appliance, DummyAppliance), appliance.is_dev)): config.hook.pytest_appliance_setup(config=config)
logger.info('removing sa %s from scc %s', sa, scc_name) provider.mgmt.remove_sa_from_scc(scc_name=scc_name, namespace=sa_namespace, sa=sa_name) else: logger.debug( "skipping sa %s in scc %s because project exists " "or it doesn't match any pattern", sa, scc_name) if __name__ == "__main__": args = parse_cmd_line() errors = 0 pf = ProviderFilter(classes=[OpenshiftProvider], required_fields=[('use_for_sprout', True)]) with DummyAppliance(): providers = list_providers(filters=[pf], use_global_filters=False) for prov in providers: # ping provider try: prov.mgmt.list_project() except Exception as e: logger.error('Connection to provider %s cannot be estabilished', prov.key) logger.error('Error: %s', e) errors += 1 continue # remove all sa records from scc if args.cleanup_scc: try:
output_queue.put(output_list) return if __name__ == "__main__": args = parse_cmd_line() # providers as a set when processing tags to ensure unique entries filters = [] if args.provider: filters.append(ProviderFilter(keys=args.provider)) if args.tag: filters.append(ProviderFilter(required_tags=args.tag)) # don't include global filter to keep disabled in the list with DummyAppliance('5.10.0.0'): providers = [prov.key for prov in list_providers(filters, use_global_filters=False)] queue = Queue() # for MP output proc_list = [ Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider)) for provider in providers ] for proc in proc_list: proc.start() for proc in proc_list: proc.join() print('Done processing providers, assembling report...') # Now pull all the results off of the queue
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, dryrun=True): """ Main method for the cleanup process Generates regex match objects Checks providers for cleanup boolean in yaml Checks provider connectivity (using ping) Process Pool for provider scanning Each provider process will thread vm scanning and deletion Args: texts (list): List of regex strings to match with max_hours (int): age limit for deletion providers (list): List of provider keys to scan and cleanup tags (list): List of tags to filter providers by dryrun (bool): Whether or not to actually delete VMs or just report Returns: int: return code, 0 on success, otherwise raises exception """ logger.info('Matching VM names against the following case-insensitive strings: %r', texts) # Compile regex, strip leading/trailing single quotes from cli arg matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts] # setup provider filter with cleanup (default), tags, and providers (from cli opts) filters = [ProviderFilter(required_fields=[('cleanup', True)])] if tags: logger.info('Adding required_tags ProviderFilter for: %s', tags) filters.append(ProviderFilter(required_tags=tags)) if providers: logger.info('Adding keys ProviderFilter for: %s', providers) filters.append(ProviderFilter(keys=providers)) # Just want keys, use list_providers with no global filters to include disabled. with DummyAppliance(): providers_to_scan = [prov.key for prov in list_providers(filters, use_global_filters=False)] logger.info('Potential providers for cleanup, filtered with given tags and provider keys: \n%s', '\n'.join(providers_to_scan)) # scan providers for vms with name matches scan_fail_queue = manager.Queue() with Pool(4) as pool: deleted_vms = pool.starmap( cleanup_provider, ((provider_key, matchers, scan_fail_queue, max_hours, dryrun) for provider_key in providers_to_scan) ) # flatten deleted_vms list, as its top level is by provider process # at same time remove None responses deleted_vms = [report for prov_list in deleted_vms if prov_list is not None for report in prov_list] scan_fail_vms = [] # add the scan failures into deleted vms for reporting sake while not scan_fail_queue.empty(): scan_fail_vms.append(scan_fail_queue.get()) with open(args.outfile, 'a') as report: report.write('## VM/Instances deleted via:\n' '## text matches: {}\n' '## age matches: {}\n' .format(texts, max_hours)) message = tabulate( sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')), headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'], tablefmt='orgtbl' ) report.write(message + '\n') logger.info(message) return 0
def dummy_appliance(): return DummyAppliance()
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, prompt=True): """ Main method for the cleanup process Generates regex match objects Checks providers for cleanup boolean in yaml Checks provider connectivity (using ping) Threads process_provider_vms to build list of vms to delete Prompts user to continue with delete Threads deleting of the vms Args: texts (list): List of regex strings to match with max_hours (int): age limit for deletion providers (list): List of provider keys to scan and cleanup tags (list): List of tags to filter providers by prompt (bool): Whether or not to prompt the user before deleting vms Returns: int: return code, 0 on success, otherwise raises exception """ logger.info( 'Matching VM names against the following case-insensitive strings: %r', texts) # Compile regex, strip leading/trailing single quotes from cli arg matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts] # setup provider filter with cleanup (default), tags, and providers (from cli opts) filters = [ProviderFilter(required_fields=[('cleanup', True)])] if tags: logger.info('Adding required_tags ProviderFilter for: %s', tags) filters.append(ProviderFilter(required_tags=tags)) if providers: logger.info('Adding keys ProviderFilter for: %s', providers) filters.append(ProviderFilter(keys=providers)) # Just want keys, use list_providers with no global filters to include disabled. with DummyAppliance(): providers_to_scan = [ prov.key for prov in list_providers(filters, use_global_filters=False) ] logger.info( 'Potential providers for cleanup, filtered with given tags and provider keys: \n%s', '\n'.join(providers_to_scan)) # scan providers for vms with name matches # manager = Manager() text_match_queue = manager.Queue() scan_fail_queue = manager.Queue() provider_scan_args = [(provider_key, matchers, text_match_queue, scan_fail_queue) for provider_key in providers_to_scan] pool_manager(scan_provider, provider_scan_args) text_matched = [] while not text_match_queue.empty(): text_matched.append(text_match_queue.get()) # scan vms for age matches age_match_queue = manager.Queue() vm_scan_args = [(provider_key, vm_name, timedelta(hours=int(max_hours)), age_match_queue, scan_fail_queue) for provider_key, vm_name in text_matched] pool_manager(scan_vm, vm_scan_args) vms_to_delete = [] while not age_match_queue.empty(): vms_to_delete.append(age_match_queue.get()) scan_fail_vms = [] # add the scan failures into deleted vms for reporting sake while not scan_fail_queue.empty(): scan_fail_vms.append(scan_fail_queue.get()) if vms_to_delete and prompt: yesno = raw_input('Delete these VMs? [y/N]: ') if str(yesno).lower() != 'y': logger.info('Exiting.') return 0 # initialize this even if we don't have anything to delete, for report consistency deleted_vms = [] if vms_to_delete: delete_queue = manager.Queue() delete_vm_args = [(provider_key, vm_name, age, delete_queue) for provider_key, vm_name, age in vms_to_delete] pool_manager(delete_vm, delete_vm_args) while not delete_queue.empty(): deleted_vms.append( delete_queue.get()) # Each item is a VmReport tuple else: logger.info('No VMs to delete.') with open(args.outfile, 'a') as report: report.write('## VM/Instances deleted via:\n' '## text matches: {}\n' '## age matches: {}\n'.format(texts, max_hours)) message = tabulate( sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')), headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'], tablefmt='orgtbl') report.write(message + '\n') logger.info(message) return 0
# TODO: clean the logic up here from cfme.utils.appliance import IPAppliance, stack, DummyAppliance # overwrite the default logger before anything else is imported, # to get our best chance at having everything import the replaced logger import cfme.utils.log cfme.utils.log.setup_for_worker(args.worker) slave_log = cfme.utils.log.logger is_dummy = json.loads(args.appliance).get("is_dummy") try: if is_dummy: slave_log.info("Loading dummy appliance...") appliance = DummyAppliance.from_json(args.appliance) else: appliance = IPAppliance.from_json(args.appliance) except ValueError: slave_log.error("Error parsing appliance json") raise else: stack.push(appliance) try: config = json.loads(args.config) except ValueError: slave_log.error("Error parsing pytest config from json") from cfme.fixtures import terminalreporter from cfme.fixtures.pytest_store import store