def templates_uploaded_on_providers(api, stream, template): if get_untested_templates(api, stream, template): print( 'report will not be generated, proceed with the next untested provider' ) sys.exit() for temp in api.template.get(limit=1, tested=False, group__name=stream).get('objects', []): if 'template_rhevm' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('rhevm'), temp['providers']): return False if 'template_rhos' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('openstack'), temp['providers']): return False if 'template_vsphere' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('virtualcenter'), temp['providers']): return False if 'template_scvmm' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('scvmm'), temp['providers']): return False return True
def pytest_configure(config): if store.parallelizer_role == 'master' or trackerbot.conf.get('url') is None: return # A further optimization here is to make the calls to trackerbot per provider # and perhaps only pull the providers that are needed, however that will need # to ensure that the tests that just randomly use providers adhere to the filters # which may be too tricky right now. count = 0 if not config.getoption('use_template_cache'): store.terminalreporter.line("Loading templates from trackerbot...", green=True) provider_templates = trackerbot.provider_templates(trackerbot.api()) for provider in list_provider_keys(): TEMPLATES[provider] = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), TEMPLATES[provider]) count += len(TEMPLATES[provider]) else: store.terminalreporter.line("Using templates from cache...", green=True) provider_templates = None for provider in list_provider_keys(): templates = config.cache.get('miq-trackerbot/{}'.format(provider), None) if templates is None: store.terminalreporter.line( "Loading templates for {} from source as not in cache".format( provider), green=True) if not provider_templates: provider_templates = trackerbot.provider_templates(trackerbot.api()) templates = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), templates) count += len(templates) TEMPLATES[provider] = templates store.terminalreporter.line(" Loaded {} templates successfully!".format(count), green=True)
def pytest_configure(config): if store.parallelizer_role == 'master' or trackerbot.conf.get('url') is None: return # A further optimization here is to make the calls to trackerbot per provider # and perhaps only pull the providers that are needed, however that will need # to ensure that the tests that just randomly use providers adhere to the filters # which may be too tricky right now. count = 0 if not config.getoption('use_template_cache'): write_line("Loading templates from trackerbot...") provider_templates = trackerbot.provider_templates(trackerbot.api()) for provider in list_provider_keys(): TEMPLATES[provider] = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), TEMPLATES[provider]) count += len(TEMPLATES[provider]) else: write_line("Using templates from cache...") provider_templates = None for provider in list_provider_keys(): templates = config.cache.get('miq-trackerbot/{}'.format(provider), None) if templates is None: write_line("Loading templates for {} from source as not in cache".format(provider)) if not provider_templates: provider_templates = trackerbot.provider_templates(trackerbot.api()) templates = provider_templates.get(provider, []) config.cache.set('miq-trackerbot/{}'.format(provider), templates) count += len(templates) TEMPLATES[provider] = templates write_line(" Loaded {} templates successfully!".format(count))
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis, output): with open(output, 'w') as report: report.write( 'ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup' ) report.write("\nDate: {}\n".format(datetime.now())) for provider_key in list_provider_keys('ec2'): provider_mgmt = get_mgmt(provider_key) print("----- Provider: {} -----".format(provider_key)) print("Deleting volumes...") delete_unattached_volumes(provider_mgmt=provider_mgmt, excluded_volumes=exclude_volumes, output=output) print("Deleting Elastic LoadBalancers...") delete_unused_loadbalancers(provider_mgmt=provider_mgmt, excluded_elbs=exclude_elbs, output=output) print("Deleting Elastic Network Interfaces...") delete_unused_network_interfaces(provider_mgmt=provider_mgmt, excluded_enis=exclude_enis, output=output) print("Releasing addresses...") delete_disassociated_addresses(provider_mgmt=provider_mgmt, excluded_eips=exclude_eips, output=output)
def run(**kwargs): # Setup defaults for the cli tool machine host = kwargs.get('ssh_host') or \ cfme_data['template_upload']['template_upload_ec2']['aws_cli_tool_client'] user = kwargs.get('ssh_user') or credentials['host_default']['username'] passwd = kwargs.get('ssh_pass') or credentials['host_default']['password'] # Download file once and thread uploading to different gce regions with make_ssh_client(host, user, passwd) as ssh_client: file_name, file_path = download_image_file(kwargs.get('image_url'), ssh_client) thread_queue = [] for provider in list_provider_keys("gce"): template_name = kwargs.get('template_name') bucket_name = kwargs.get('bucket_name') stream = kwargs.get('stream') with make_ssh_client(host, user, passwd) as ssh_client: thread = Thread(target=upload_template, args=(provider, template_name, stream, file_name, file_path, ssh_client, bucket_name)) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join()
def run(**kwargs): """Calls all the functions needed to upload new template to RHEVM. This is called either by template_upload_all script, or by main function. Args: **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']. """ thread_queue = [] valid_providers = [] providers = list_provider_keys("rhevm") if kwargs['provider_data']: mgmt_sys = providers = kwargs['provider_data']['management_systems'] for provider in providers: if kwargs['provider_data']: if mgmt_sys[provider]['type'] != 'rhevm': continue sshname = mgmt_sys[provider]['sshname'] sshpass = mgmt_sys[provider]['sshpass'] rhevip = mgmt_sys[provider]['ipaddress'] else: mgmt_sys = cfme_data['management_systems'] ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevip = mgmt_sys[provider]['ipaddress'] print("RHEVM:{} verifying provider's state before template upload".format(provider)) if not net.is_pingable(rhevip): continue elif not is_ovirt_engine_running(rhevip, sshname, sshpass): print('RHEVM:{} ovirt-engine service not running..'.format(provider)) continue valid_providers.append(provider) for provider in valid_providers: if kwargs['provider_data']: sshname = mgmt_sys[provider]['sshname'] sshpass = mgmt_sys[provider]['sshpass'] username = mgmt_sys[provider]['username'] password = mgmt_sys[provider]['password'] else: ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevm_credentials = mgmt_sys[provider]['credentials'] username = credentials[rhevm_credentials]['username'] password = credentials[rhevm_credentials]['password'] rhevip = mgmt_sys[provider]['ipaddress'] thread = Thread(target=upload_template, args=(rhevip, sshname, sshpass, username, password, provider, kwargs.get('image_url'), kwargs.get('template_name'), kwargs['provider_data'], kwargs['stream'])) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join()
def templates_uploaded_on_providers(api, stream, template): if get_untested_templates(api, stream, template): print('report will not be generated, proceed with the next untested provider') sys.exit() for temp in api.template.get( limit=1, tested=False, group__name=stream).get('objects', []): if 'template_rhevm' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('rhevm'), temp['providers']): return False if 'template_rhos' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('openstack'), temp['providers']): return False if 'template_vsphere' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('virtualcenter'), temp['providers']): return False if 'template_scvmm' in images_uploaded(stream): if not provider_in_the_list(list_provider_keys('scvmm'), temp['providers']): return False return True
def main(*providers): for provider_key in list_provider_keys('openstack'): print('Checking {}'.format(provider_key)) api = get_mgmt(provider_key).api try: fips = api.floating_ips.findall(fixed_ip=None) except Exception: print('Unable to get fips for {}:'.format(provider_key)) print(format_exc().splitlines()[-1]) continue for fip in fips: print('Deleting {} on {}'.format(fip.ip, provider_key)) fip.delete() print('{} deleted'.format(fip.ip))
def ec2cleanup(exclude_volumes, exclude_eips, output): with open(output, 'w') as report: report.write('ec2cleanup.py, Address and Volume Cleanup') report.write("\nDate: {}\n".format(datetime.now())) for provider_key in list_provider_keys('ec2'): provider_mgmt = get_mgmt(provider_key) print "----- Provider: {} -----".format(provider_key) # noqa print "Releasing addresses..." # noqa delete_disassociated_addresses(provider_mgmt=provider_mgmt, excluded_eips=exclude_eips, output=output) print "Deleting volumes..." # noqa delete_unattached_volumes(provider_mgmt=provider_mgmt, excluded_volumes=exclude_volumes, output=output)
def run(**kwargs): """Calls all the functions needed to import new template to EC2. This is called either by template_upload_all script, or by main function. Args: **kwargs: Kwargs are passed by template_upload_all. """ mgmt_sys = cfme_data['management_systems'] for provider in list_provider_keys('ec2'): ssh_rhevm_creds = mgmt_sys[provider]['credentials'] username = credentials[ssh_rhevm_creds]['username'] password = credentials[ssh_rhevm_creds]['password'] upload_bucket_name = mgmt_sys[provider]['upload_bucket_name'] upload_template(provider, username, password, upload_bucket_name, kwargs.get( 'image_url'), kwargs.get('template_name'))
def process_tags(provider_keys, tags=None): """ Process the tags provided on command line to build a list of provider keys that match :param tags: list of tags to match against cfme_data :param provider_keys list of provider_keys to append to :return: list or provider keys matching tags """ # Check for tags first, build list of provider_keys from it if tags: all_provider_keys = list_provider_keys() for key in all_provider_keys: # need to check tags list against yaml tags list for intersection of a single tag yaml_tags = cfme_data['management_systems'][key]['tags'] if any(tag in tags for tag in yaml_tags): print('Matched tag from {} on provider {}:tags:{}'.format(tags, key, yaml_tags)) provider_keys.add(key)
def azure_cleanup(nic_template, pip_template, days_old, output): with open(output, 'w') as report: report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup') report.write("\nDate: {}\n".format(datetime.now())) try: for provider_key in list_provider_keys('azure'): provider_mgmt = get_mgmt(provider_key) nic_list = provider_mgmt.list_free_nics(nic_template) pip_list = provider_mgmt.list_free_pip(pip_template) stack_list = provider_mgmt.list_stack(days_old=days_old) report.write("----- Provider: {} -----\n".format(provider_key)) if nic_list: report.write( "Removing Nics with the name \'{}\':\n".format( nic_template)) report.write("\n".join(str(k) for k in nic_list)) report.write("\n") provider_mgmt.remove_nics_by_search(nic_template) else: report.write( "No \'{}\' NICs were found\n".format(nic_template)) if pip_list: report.write( "Removing Public IPs with the name \'{}\':\n".format( pip_template)) report.write("\n".join(str(k) for k in pip_list)) report.write("\n") provider_mgmt.remove_pips_by_search(pip_template) else: report.write("No \'{}\' Public IPs were found\n".format( pip_template)) if stack_list: report.write("Removing empty Stacks:\n") for stack in stack_list: if provider_mgmt.is_stack_empty(stack): provider_mgmt.delete_stack(stack) report.write( "Stack {} is empty - Removed\n".format(stack)) else: report.write( "No stacks older than \'{}\' days were found\n".format( days_old)) return 0 except Exception: report.write("Something bad happened during Azure cleanup\n") report.write(tb.format_exc()) return 1
def run(**kwargs): thread_queue = [] for provider in list_provider_keys("gce"): mgmt_sys = cfme_data['management_systems'][provider] gce_credentials = credentials[mgmt_sys['credentials']] service_account = gce_credentials['service_account'] project = mgmt_sys['project'] zone = mgmt_sys['zone'] thread = Thread(target=upload_template, args=(project, zone, service_account, kwargs.get('image_url'), kwargs.get('template_name'), kwargs.get('bucket_name'), provider)) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join()
def run(**kwargs): thread_queue = [] providers = list_provider_keys("openstack") if kwargs['provider_data']: provider_data = kwargs['provider_data'] mgmt_sys = providers = provider_data['management_systems'] for provider in providers: if kwargs['provider_data']: if mgmt_sys[provider]['type'] != 'openstack': continue username = mgmt_sys[provider]['username'] password = mgmt_sys[provider]['password'] sshname = mgmt_sys[provider]['sshname'] sshpass = mgmt_sys[provider]['sshpass'] else: mgmt_sys = cfme_data['management_systems'] rhos_credentials = credentials[mgmt_sys[provider]['credentials']] default_host_creds = credentials['host_default'] username = rhos_credentials['username'] password = rhos_credentials['password'] sshname = default_host_creds['username'] sshpass = default_host_creds['password'] rhosip = mgmt_sys[provider]['ipaddress'] auth_url = mgmt_sys[provider]['auth_url'] if not net.is_pingable(rhosip): continue if not net.net_check(ports.SSH, rhosip): print("SSH connection to {}:{} failed, port unavailable".format( provider, ports.SSH)) continue thread = Thread(target=upload_template, args=(rhosip, sshname, sshpass, username, password, auth_url, provider, kwargs.get('image_url'), kwargs.get('template_name'), kwargs['provider_data'], kwargs['stream'])) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join()
def azure_cleanup(nic_template, pip_template, days_old, output): with open(output, 'w') as report: report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup') report.write("\nDate: {}\n".format(datetime.now())) try: for provider_key in list_provider_keys('azure'): provider_mgmt = get_mgmt(provider_key) nic_list = provider_mgmt.list_free_nics(nic_template) pip_list = provider_mgmt.list_free_pip(pip_template) stack_list = provider_mgmt.list_stack(days_old=days_old) report.write("----- Provider: {} -----\n".format(provider_key)) if nic_list: report.write("Removing Nics with the name \'{}\':\n".format(nic_template)) report.write("\n".join(str(k) for k in nic_list)) report.write("\n") provider_mgmt.remove_nics_by_search(nic_template) else: report.write("No \'{}\' NICs were found\n".format(nic_template)) if pip_list: report.write("Removing Public IPs with the name \'{}\':\n". format(pip_template)) report.write("\n".join(str(k) for k in pip_list)) report.write("\n") provider_mgmt.remove_pips_by_search(pip_template) else: report.write("No \'{}\' Public IPs were found\n".format(pip_template)) if stack_list: report.write( "Removing empty Stacks:\n") for stack in stack_list: if provider_mgmt.is_stack_empty(stack): provider_mgmt.delete_stack(stack) report.write("Stack {} is empty - Removed\n".format(stack)) else: report.write("No stacks older than \'{}\' days were found\n".format( days_old)) return 0 except Exception: report.write("Something bad happened during Azure cleanup\n") report.write(tb.format_exc()) return 1
def run(**kwargs): try: thread_queue = [] providers = list_provider_keys("virtualcenter") if kwargs['provider_data']: mgmt_sys = providers = kwargs['provider_data'][ 'management_systems'] for provider in providers: if kwargs['provider_data']: if mgmt_sys[provider]['type'] != 'virtualcenter': continue username = mgmt_sys[provider]['username'] password = mgmt_sys[provider]['password'] else: mgmt_sys = cfme_data['management_systems'] creds = credentials[mgmt_sys[provider]['credentials']] username = creds['username'] password = creds['password'] host_ip = mgmt_sys[provider]['ipaddress'] hostname = mgmt_sys[provider]['hostname'] client = VMWareSystem(hostname, username, password) if not net.is_pingable(host_ip): continue thread = Thread(target=upload_template, args=(client, hostname, username, password, provider, kwargs.get('image_url'), kwargs.get('template_name'), kwargs['provider_data'], kwargs['stream'])) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join() except Exception: logger.exception('Exception during run method') return False
def run(**kwargs): try: thread_queue = [] providers = list_provider_keys("virtualcenter") if kwargs['provider_data']: mgmt_sys = providers = kwargs['provider_data']['management_systems'] for provider in providers: if kwargs['provider_data']: if mgmt_sys[provider]['type'] != 'virtualcenter': continue username = mgmt_sys[provider]['username'] password = mgmt_sys[provider]['password'] else: mgmt_sys = cfme_data['management_systems'] creds = credentials[mgmt_sys[provider]['credentials']] username = creds['username'] password = creds['password'] host_ip = mgmt_sys[provider]['ipaddress'] hostname = mgmt_sys[provider]['hostname'] client = VMWareSystem(hostname, username, password) if not net.is_pingable(host_ip): continue thread = Thread(target=upload_template, args=(client, hostname, username, password, provider, kwargs.get('image_url'), kwargs.get('template_name'), kwargs['provider_data'], kwargs['stream'])) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join() except Exception as e: print(e) return False
def run(**kwargs): for provider in list_provider_keys("scvmm"): kwargs = make_kwargs_scvmm(cfme_data, provider, kwargs.get('image_url'), kwargs.get('template_name')) check_kwargs(**kwargs) mgmt_sys = cfme_data['management_systems'][provider] host_fqdn = mgmt_sys['hostname_fqdn'] creds = credentials[mgmt_sys['credentials']] # For powershell to work, we need to extract the User Name from the Domain user = creds['username'].split('\\') if len(user) == 2: username_powershell = user[1] else: username_powershell = user[0] username_scvmm = creds['domain'] + "\\" + creds['username'] scvmm_args = { "hostname": mgmt_sys['ipaddress'], "username": username_powershell, "password": creds['password'], "domain": creds['domain'], "provisioning": mgmt_sys['provisioning'] } client = SCVMMSystem(**scvmm_args) url = kwargs.get('image_url') # Template name equals either user input of we extract the name from the url new_template_name = kwargs.get('template_name') if new_template_name is None: new_template_name = os.path.basename(url)[:-4] logger.info("SCVMM:{} Make Template out of the VHD {}", provider, new_template_name) # use_library is either user input or we use the cfme_data value library = kwargs.get('library', mgmt_sys['template_upload'].get('vhds', None)) logger.info("SCVMM:{} Template Library: {}", provider, library) # The VHD name changed, match the template_name. new_vhd_name = new_template_name + '.vhd' network = mgmt_sys['template_upload'].get('network', None) os_type = mgmt_sys['template_upload'].get('os_type', None) cores = mgmt_sys['template_upload'].get('cores', None) ram = mgmt_sys['template_upload'].get('ram', None) # Uses PowerShell Get-SCVMTemplate to return a list of templates and aborts if exists. if not client.does_template_exist(new_template_name): if kwargs.get('upload'): logger.info("SCVMM:{} Uploading VHD image to Library VHD folder.", provider) upload_vhd(client, url, library, new_vhd_name) if kwargs.get('template'): logger.info("SCVMM:{} Make Template out of the VHD {}", provider, new_template_name) make_template( client, host_fqdn, new_template_name, library, network, os_type, username_scvmm, cores, ram ) try: wait_for(lambda: client.does_template_exist(new_template_name), fail_condition=False, delay=5) logger.info("SCVMM:{} template {} uploaded success", provider, new_template_name) logger.info("SCVMM:{} Add template {} to trackerbot", provider, new_template_name) trackerbot.trackerbot_add_provider_template(kwargs.get('stream'), provider, kwargs.get('template_name')) except Exception: logger.exception("SCVMM:{} Exception verifying the template {}", provider, new_template_name) else: logger.info("SCVMM: A Template with that name already exists in the SCVMMLibrary")
def generate_html_report(api, stream, filename, appliance_template): status = 'PASSED' number_of_images_before = len(images_uploaded(stream)) if get_untested_templates(api, stream, appliance_template): print('report will not be generated, proceed with the next untested provider') sys.exit() stream_data = get_latest_tested_template_on_stream(api, stream, appliance_template) if len(images_uploaded(stream)) > number_of_images_before: print("new images are uploaded on latest directory, wait for upload on providers") wait_for_templates_on_providers(api, stream, appliance_template) if appliance_template and appliance_template != stream_data['template_name']: print("the report will be generated only for the latest templates") sys.exit() if stream_data and not get_untested_templates(api, stream_data['group_name'], appliance_template): print("Found tested template for {}".format(stream)) print("Gathering tested template data for {}".format(stream)) print("Updating the template log") stream_html = [stream_data['template_name'], stream_data['passed_on_providers'], stream_data['failed_on_providers'], stream_data['group_name'], stream_data['datestamp']] if 'html' in filename: data = template_env.get_template('template_tester_report.html').render( upstream=stream_html) with open(filename, 'w') as report: report.write(data) else: with open(filename, 'a+') as report: if 'template_rhos' not in images_uploaded(stream): print('\n\nMISSING: Image for OpenStack in latest directory') report.write('\n\nMISSING: Image for OpenStack in latest directory') elif provider_in_the_list(list_provider_keys('openstack'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format(images_uploaded(stream)['template_rhos'])) map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('openstack'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('openstack'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format(images_uploaded(stream)['template_rhos'])) map(lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list(list_provider_keys('openstack'), stream_data['failed_on_providers'])) else: print('\n\nMISSING: OpenStack template is not available on any ' 'rhos providers yet') report.write('\n\nMISSING: OpenStack template is not available on any ' 'rhos providers yet') if 'template_rhevm' not in images_uploaded(stream): print('\n\nMISSING: Image for RHEVM in latest directory') report.write('\n\nMISSING: Image for RHEVM in latest directory') elif provider_in_the_list(list_provider_keys('rhevm'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_rhevm'])) map(lambda(x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('rhevm'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('rhevm'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_rhevm'])) map(lambda(x): report.write('\n{}: Failed'.format(x)), provider_in_the_list(list_provider_keys('rhevm'), stream_data['failed_on_providers'])) else: print('\n\nMISSING: RHEVM template is not available on any ' 'rhevm providers yet') report.write('\n\nMISSING: RHEVM template is not available on any ' 'rhevm providers yet') if 'template_vsphere' not in images_uploaded(stream): print('\n\nMISSING: Image for VIRTUALCENTER in latest directory') report.write('\n\nMISSING: Image for VIRTUALCENTER in latest directory') elif provider_in_the_list(list_provider_keys('virtualcenter'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_vsphere'])) map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('virtualcenter'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('virtualcenter'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_vsphere'])) map(lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list(list_provider_keys('virtualcenter'), stream_data['failed_on_providers'])) else: print('\n\nMISSING: VIRTUALCENTER template is not available on any ' 'vmware providers yet') report.write('\n\nMISSING: VIRTUALCENTER template is not available on any ' 'vmware providers yet') if 'template_scvmm' not in images_uploaded(stream): print('\n\nMISSING: Image for SCVMM in latest directory') report.write('\n\nMISSING: Image for SCVMM in latest directory') elif provider_in_the_list(list_provider_keys('scvmm'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_scvmm'])) map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('scvmm'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('scvmm'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_scvmm'])) map(lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list(list_provider_keys('scvmm'), stream_data['failed_on_providers'])) else: print('\n\nMISSING: SCVMM template is not available on any ' 'scvmm providers yet') report.write('\n\nMISSING: SCVMM template is not available on any ' 'scvmm providers yet') report.seek(0, 0) lines = report.readlines() template_missing = filter(lambda (x): "MISSING" in x, lines) template_passed = filter(lambda (x): "PASSED" in x, lines) template_failed = filter(lambda (x): "FAILED" in x, lines) if template_failed: status = "FAILED" if template_missing and not (template_passed or template_failed): report.close() sys.exit("Template is MISSING....Please verify uploads....") print("template_tester_results report generated:{}".format(status)) else: print("No Templates tested on: {}".format(datetime.datetime.now()))
def cleanup_vms(texts, max_hours=24, providers=None, prompt=True): providers = providers or list_provider_keys() providers_data = cfme_data.get("management_systems", {}) delta = datetime.timedelta(hours=int(max_hours)) vms_to_delete = defaultdict(set) thread_queue = [] # precompile regexes matchers = [re.compile(text, re.IGNORECASE) for text in texts] for provider_key in providers: # check for cleanup boolean if not cfme_data['management_systems'][provider_key].get( 'cleanup', False): print('Skipping {}, cleanup set to false in yaml'.format( provider_key)) continue ipaddress = cfme_data['management_systems'][provider_key].get( 'ipaddress', None) if ipaddress and not net.is_pingable(ipaddress): continue provider_type = providers_data[provider_key].get('type', None) list_vms = args.list_vms and provider_type in args.provider_type thread = Thread(target=process_provider_vms, args=(provider_key, provider_type, matchers, delta, vms_to_delete, list_vms)) # Mark as daemon thread for easy-mode KeyboardInterrupt handling thread.daemon = True thread_queue.append(thread) thread.start() # Join the queued calls for thread in thread_queue: thread.join() if providers_vm_list: with open(args.outfile, 'a+') as report: message = tabulate(providers_vm_list, headers=[ 'ProviderType', 'ProviderKey', 'InstanceName', 'CreatedSince', 'InstanceType', 'InstanceStatus' ], tablefmt='orgtbl') report.write(message) print(message) for provider_key, vm_set in vms_to_delete.items(): print('{}:'.format(provider_key)) for vm_name, vm_delta in vm_set: days, hours = vm_delta.days, vm_delta.seconds / 3600 print(' {} is {} days, {} hours old'.format(vm_name, days, hours)) if vms_to_delete and prompt: yesno = raw_input('Delete these VMs? [y/N]: ') if str(yesno).lower() != 'y': print('Exiting.') return 0 if not vms_to_delete: print('No VMs to delete.') thread_queue = [] for provider_key, vm_set in vms_to_delete.items(): thread = Thread(target=delete_provider_vms, args=(provider_key, [name for name, t_delta in vm_set])) # Mark as daemon thread for easy-mode KeyboardInterrupt handling thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join() print("Deleting finished")
def main(*providers): for provider_key in providers: print("Cleaning up {}".format(provider_key)) api = get_mgmt(provider_key).capi try: volumes = api.volumes.findall(attachments=[]) except Exception as e: print("Connect to provider failed:{} {} {}".format( provider_key, type(e).__name__, str(e))) continue for volume in volumes: if iso8601.parse_date(volume.created_at) < ( datetime.now(tz=local_tz) - GRACE_TIME): print("Deleting {}".format(volume.id)) try: volume.delete() except Exception as e: print("Delete failed: {} {}".format( type(e).__name__, str(e))) if __name__ == "__main__": provs = sys.argv[1:] if provs: main(*provs) else: main(*list_provider_keys("openstack"))
vm_name, status or NULL, creation or NULL, str(vm_type) or NULL]) continue output_queue.put(output_list) return if __name__ == "__main__": args = parse_cmd_line() # providers as a set when processing tags to ensure unique entries providers = set(args.provider) process_tags(providers, args.tag) providers = providers or list_provider_keys() queue = Queue() # for MP output proc_list = [ Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider)) for provider in providers ] for proc in proc_list: proc.start() for proc in proc_list: proc.join() print('Done processing providers, assembling report...') # Now pull all the results off of the queue # Stacking the generator this way is equivalent to using list.extend instead of list.append
def get_orphaned_vmware_files(provider=None): providers = [provider] if provider else list_provider_keys("virtualcenter") for provider_key in providers: # we can add thread here get_datastores_per_host(provider_key)
def main(trackerbot_url, mark_usable=None): api = trackerbot.api(trackerbot_url) thread_q = [] thread_lock = Lock() template_providers = defaultdict(list) all_providers = set(list_provider_keys()) unresponsive_providers = set() # Queue up list_template calls for provider_key in all_providers: ipaddress = cfme_data['management_systems'][provider_key].get('ipaddress', None) if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=get_provider_templates, args=(provider_key, template_providers, unresponsive_providers, thread_lock)) thread_q.append(thread) thread.start() # Join the queued calls for thread in thread_q: thread.join() seen_templates = set() if mark_usable is None: usable = {} else: usable = {'usable': mark_usable} existing_provider_templates = [ pt['id'] for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']] # Find some templates and update the API for template_name, providers in template_providers.items(): template_name = str(template_name) group_name, datestamp, stream = trackerbot.parse_template(template_name) # Don't want sprout templates if group_name in ('sprout', 'rhevm-internal'): print('Ignoring {} from group {}'.format(template_name, group_name)) continue seen_templates.add(template_name) group = trackerbot.Group(group_name, stream=stream) template = trackerbot.Template(template_name, group, datestamp) for provider_key in providers: provider = trackerbot.Provider(provider_key) if '{}_{}'.format(template_name, provider_key) in existing_provider_templates: print('Template {} already tracked for provider {}'.format( template_name, provider_key)) continue try: trackerbot.mark_provider_template(api, provider, template, **usable) print('Added {} template {} on provider {} (datestamp: {})'.format( group_name, template_name, provider_key, datestamp)) except SlumberHttpBaseException as ex: print("{}\t{}".format(ex.response.status_code, ex.content)) # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']: provider_key, template_name = pt['provider']['key'], pt['template']['name'] if provider_key not in template_providers[template_name] \ and provider_key not in unresponsive_providers: if provider_key in all_providers: print("Cleaning up template {} on {}".format(template_name, provider_key)) trackerbot.delete_provider_template(api, provider_key, template_name) else: print("Skipping template cleanup {} on unknown provider {}".format( template_name, provider_key)) # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(api, api.template.get())['objects']: if not template['providers']: print("Deleting template {} (no providers)".format(template['name'])) api.template(template['name']).delete()
def main(trackerbot_url, mark_usable=None): api = trackerbot.api(trackerbot_url) thread_q = [] thread_lock = Lock() template_providers = defaultdict(list) all_providers = set(list_provider_keys()) unresponsive_providers = set() # Queue up list_template calls for provider_key in all_providers: ipaddress = cfme_data['management_systems'][provider_key].get( 'ipaddress', None) if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=get_provider_templates, args=(provider_key, template_providers, unresponsive_providers, thread_lock)) thread_q.append(thread) thread.start() # Join the queued calls for thread in thread_q: thread.join() seen_templates = set() if mark_usable is None: usable = {} else: usable = {'usable': mark_usable} existing_provider_templates = [ pt['id'] for pt in trackerbot.depaginate( api, api.providertemplate.get())['objects'] ] # Find some templates and update the API for template_name, providers in template_providers.items(): template_name = str(template_name) group_name, datestamp, stream = trackerbot.parse_template( template_name) # Don't want sprout templates if group_name in ('sprout', 'rhevm-internal'): print('Ignoring {} from group {}'.format(template_name, group_name)) continue seen_templates.add(template_name) group = trackerbot.Group(group_name, stream=stream) template = trackerbot.Template(template_name, group, datestamp) for provider_key in providers: provider = trackerbot.Provider(provider_key) if '{}_{}'.format(template_name, provider_key) in existing_provider_templates: print('Template {} already tracked for provider {}'.format( template_name, provider_key)) continue try: trackerbot.mark_provider_template(api, provider, template, **usable) print('Added {} template {} on provider {} (datestamp: {})'. format(group_name, template_name, provider_key, datestamp)) except SlumberHttpBaseException as ex: print("{}\t{}".format(ex.response.status_code, ex.content)) # Remove provider relationships where they no longer exist, skipping unresponsive providers, # and providers not known to this environment for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']: provider_key, template_name = pt['provider']['key'], pt['template'][ 'name'] if provider_key not in template_providers[template_name] \ and provider_key not in unresponsive_providers: if provider_key in all_providers: print("Cleaning up template {} on {}".format( template_name, provider_key)) trackerbot.delete_provider_template(api, provider_key, template_name) else: print("Skipping template cleanup {} on unknown provider {}". format(template_name, provider_key)) # Remove templates that aren't on any providers anymore for template in trackerbot.depaginate(api, api.template.get())['objects']: if not template['providers']: print("Deleting template {} (no providers)".format( template['name'])) api.template(template['name']).delete()
def cleanup_vms(texts, max_hours=24, providers=None, prompt=True): """ Main method for the cleanup process Generates regex match objects Checks providers for cleanup boolean in yaml Checks provider connectivity (using ping) Threads process_provider_vms to build list of vms to delete Prompts user to continue with delete Threads deleting of the vms Args: texts (list): List of regex strings to match with max_hours (int): age limit for deletion providers (list): List of provider keys to scan and cleanup prompt (bool): Whether or not to prompt the user before deleting vms Returns: int: return code, 0 on success, otherwise raises exception """ logger.info('Matching VM names against the following case-insensitive strings: %s', texts) # Compile regex, strip leading/trailing single quotes from cli arg matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts] providers_to_scan = [] for provider_key in providers or list_provider_keys(): # check for cleanup boolean if not cfme_data['management_systems'][provider_key].get('cleanup', False): logger.info('SKIPPING %s, cleanup set to false or missing in yaml', provider_key) continue logger.info('SCANNING %s', provider_key) providers_to_scan.append(provider_key) # scan providers for vms with name matches # manager = Manager() text_match_queue = manager.Queue() scan_fail_queue = manager.Queue() provider_scan_args = [ (provider_key, matchers, text_match_queue, scan_fail_queue) for provider_key in providers_to_scan] pool_manager(scan_provider, provider_scan_args) text_matched = [] while not text_match_queue.empty(): text_matched.append(text_match_queue.get()) # scan vms for age matches age_match_queue = manager.Queue() vm_scan_args = [ (provider_key, vm_name, timedelta(hours=int(max_hours)), age_match_queue, scan_fail_queue) for provider_key, vm_name in text_matched] pool_manager(scan_vm, vm_scan_args) vms_to_delete = [] while not age_match_queue.empty(): vms_to_delete.append(age_match_queue.get()) scan_fail_vms = [] # add the scan failures into deleted vms for reporting sake while not scan_fail_queue.empty(): scan_fail_vms.append(scan_fail_queue.get()) if vms_to_delete and prompt: yesno = raw_input('Delete these VMs? [y/N]: ') if str(yesno).lower() != 'y': logger.info('Exiting.') return 0 # initialize this even if we don't have anything to delete, for report consistency deleted_vms = [] if vms_to_delete: delete_queue = manager.Queue() delete_vm_args = [(provider_key, vm_name, age, delete_queue) for provider_key, vm_name, age in vms_to_delete] pool_manager(delete_vm, delete_vm_args) while not delete_queue.empty(): deleted_vms.append(delete_queue.get()) # Each item is a VmReport tuple else: logger.info('No VMs to delete.') with open(args.outfile, 'a') as report: report.write('## VM/Instances deleted via:\n' '## text matches: {}\n' '## age matches: {}\n' .format(texts, max_hours)) message = tabulate(sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')), headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'], tablefmt='orgtbl') report.write(message + '\n') logger.info(message) return 0
def generate_html_report(api, stream, filename, appliance_template): status = 'PASSED' number_of_images_before = len(images_uploaded(stream)) if get_untested_templates(api, stream, appliance_template): print( 'report will not be generated, proceed with the next untested provider' ) sys.exit() stream_data = get_latest_tested_template_on_stream(api, stream, appliance_template) if len(images_uploaded(stream)) > number_of_images_before: print( "new images are uploaded on latest directory, wait for upload on providers" ) wait_for_templates_on_providers(api, stream, appliance_template) if appliance_template and appliance_template != stream_data[ 'template_name']: print("the report will be generated only for the latest templates") sys.exit() if stream_data and not get_untested_templates( api, stream_data['group_name'], appliance_template): print("Found tested template for {}".format(stream)) print("Gathering tested template data for {}".format(stream)) print("Updating the template log") stream_html = [ stream_data['template_name'], stream_data['passed_on_providers'], stream_data['failed_on_providers'], stream_data['group_name'], stream_data['datestamp'] ] if 'html' in filename: data = template_env.get_template( 'template_tester_report.html').render(upstream=stream_html) with open(filename, 'w') as report: report.write(data) else: with open(filename, 'a+') as report: if 'template_rhos' not in images_uploaded(stream): print( '\n\nMISSING: Image for OpenStack in latest directory') report.write( '\n\nMISSING: Image for OpenStack in latest directory') elif provider_in_the_list(list_provider_keys('openstack'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_rhos'])) map( lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('openstack'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('openstack'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_rhos'])) map( lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list( list_provider_keys('openstack'), stream_data['failed_on_providers'])) else: print( '\n\nMISSING: OpenStack template is not available on any ' 'rhos providers yet') report.write( '\n\nMISSING: OpenStack template is not available on any ' 'rhos providers yet') if 'template_rhevm' not in images_uploaded(stream): print('\n\nMISSING: Image for RHEVM in latest directory') report.write( '\n\nMISSING: Image for RHEVM in latest directory') elif provider_in_the_list(list_provider_keys('rhevm'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_rhevm'])) map( lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('rhevm'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('rhevm'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_rhevm'])) map( lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list( list_provider_keys('rhevm'), stream_data['failed_on_providers'])) else: print( '\n\nMISSING: RHEVM template is not available on any ' 'rhevm providers yet') report.write( '\n\nMISSING: RHEVM template is not available on any ' 'rhevm providers yet') if 'template_vsphere' not in images_uploaded(stream): print( '\n\nMISSING: Image for VIRTUALCENTER in latest directory' ) report.write( '\n\nMISSING: Image for VIRTUALCENTER in latest directory' ) elif provider_in_the_list(list_provider_keys('virtualcenter'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_vsphere'])) map( lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('virtualcenter'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('virtualcenter'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_vsphere'])) map( lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list( list_provider_keys('virtualcenter'), stream_data['failed_on_providers'])) else: print( '\n\nMISSING: VIRTUALCENTER template is not available on any ' 'vmware providers yet') report.write( '\n\nMISSING: VIRTUALCENTER template is not available on any ' 'vmware providers yet') if 'template_scvmm' not in images_uploaded(stream): print('\n\nMISSING: Image for SCVMM in latest directory') report.write( '\n\nMISSING: Image for SCVMM in latest directory') elif provider_in_the_list(list_provider_keys('scvmm'), stream_data['passed_on_providers']): report.write('\n\nPASSED: {}'.format( images_uploaded(stream)['template_scvmm'])) map( lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list( list_provider_keys('scvmm'), stream_data['passed_on_providers'])) elif provider_in_the_list(list_provider_keys('scvmm'), stream_data['failed_on_providers']): report.write('\n\nFAILED: {}'.format( images_uploaded(stream)['template_scvmm'])) map( lambda (x): report.write('\n{}: Failed'.format(x)), provider_in_the_list( list_provider_keys('scvmm'), stream_data['failed_on_providers'])) else: print( '\n\nMISSING: SCVMM template is not available on any ' 'scvmm providers yet') report.write( '\n\nMISSING: SCVMM template is not available on any ' 'scvmm providers yet') report.seek(0, 0) lines = report.readlines() template_missing = filter(lambda (x): "MISSING" in x, lines) template_passed = filter(lambda (x): "PASSED" in x, lines) template_failed = filter(lambda (x): "FAILED" in x, lines) if template_failed: status = "FAILED" if template_missing and not (template_passed or template_failed): report.close() sys.exit( "Template is MISSING....Please verify uploads....") print("template_tester_results report generated:{}".format(status)) else: print("No Templates tested on: {}".format(datetime.datetime.now()))
def run(**kwargs): for provider in list_provider_keys("scvmm"): kwargs = make_kwargs_scvmm(cfme_data, provider, kwargs.get('image_url'), kwargs.get('template_name')) check_kwargs(**kwargs) mgmt_sys = cfme_data['management_systems'][provider] host_fqdn = mgmt_sys['hostname_fqdn'] creds = credentials[mgmt_sys['credentials']] # For powershell to work, we need to extract the User Name from the Domain user = creds['username'].split('\\') if len(user) == 2: username_powershell = user[1] else: username_powershell = user[0] username_scvmm = creds['domain'] + "\\" + creds['username'] scvmm_args = { "hostname": mgmt_sys['ipaddress'], "username": username_powershell, "password": creds['password'], "domain": creds['domain'], "provisioning": mgmt_sys['provisioning'] } client = SCVMMSystem(**scvmm_args) url = kwargs.get('image_url') # Template name equals either user input of we extract the name from the url new_template_name = kwargs.get('template_name') if new_template_name is None: new_template_name = os.path.basename(url)[:-4] print("SCVMM:{} Make Template out of the VHD {}".format(provider, new_template_name)) # use_library is either user input or we use the cfme_data value library = kwargs.get('library', mgmt_sys['template_upload'].get('vhds', None)) print("SCVMM:{} Template Library: {}".format(provider, library)) # The VHD name changed, match the template_name. new_vhd_name = new_template_name + '.vhd' network = mgmt_sys['template_upload'].get('network', None) os_type = mgmt_sys['template_upload'].get('os_type', None) cores = mgmt_sys['template_upload'].get('cores', None) ram = mgmt_sys['template_upload'].get('ram', None) # Uses PowerShell Get-SCVMTemplate to return a list of templates and aborts if exists. if not client.does_template_exist(new_template_name): if kwargs.get('upload'): print("SCVMM:{} Uploading VHD image to Library VHD folder.".format(provider)) upload_vhd(client, url, library, new_vhd_name) if kwargs.get('template'): print("SCVMM:{} Make Template out of the VHD {}".format( provider, new_template_name)) make_template( client, host_fqdn, new_template_name, library, network, os_type, username_scvmm, cores, ram ) try: wait_for(lambda: client.does_template_exist(new_template_name), fail_condition=False, delay=5) print("SCVMM:{} template {} uploaded successfully".format( provider, new_template_name)) print("SCVMM:{} Adding template {} to trackerbot".format( provider, new_template_name)) trackerbot.trackerbot_add_provider_template(kwargs.get('stream'), provider, kwargs.get('template_name')) except Exception as e: print(e) print("SCVMM:{} Exception occured while verifying the template {} upload". format(provider, new_template_name)) else: print("SCVMM: A Template with that name already exists in the SCVMMLibrary")
def run(**kwargs): """Calls all the functions needed to upload new template to RHEVM. This is called either by template_upload_all script, or by main function. Args: **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']. """ thread_queue = [] valid_providers = [] providers = list_provider_keys("rhevm") if kwargs['provider_data']: mgmt_sys = providers = kwargs['provider_data']['management_systems'] for provider in providers: if kwargs['provider_data']: if mgmt_sys[provider]['type'] != 'rhevm': continue sshname = mgmt_sys[provider]['sshname'] sshpass = mgmt_sys[provider]['sshpass'] rhevip = mgmt_sys[provider]['ipaddress'] else: mgmt_sys = cfme_data['management_systems'] ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevip = mgmt_sys[provider]['ipaddress'] if (mgmt_sys[provider].get('template_upload') and mgmt_sys[provider]['template_upload'].get('block_upload')): # Providers template_upload section indicates upload should not happen on this provider continue print("RHEVM:{} verifying provider's state before template upload".format(provider)) if not net.is_pingable(rhevip): continue elif not is_ovirt_engine_running(rhevip, sshname, sshpass): print('RHEVM:{} ovirt-engine service not running..'.format(provider)) continue valid_providers.append(provider) for provider in valid_providers: if kwargs['provider_data']: sshname = mgmt_sys[provider]['sshname'] sshpass = mgmt_sys[provider]['sshpass'] username = mgmt_sys[provider]['username'] password = mgmt_sys[provider]['password'] else: ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevm_credentials = mgmt_sys[provider]['credentials'] username = credentials[rhevm_credentials]['username'] password = credentials[rhevm_credentials]['password'] rhevip = mgmt_sys[provider]['ipaddress'] thread = Thread(target=upload_template, args=(rhevip, sshname, sshpass, username, password, provider, kwargs.get('image_url'), kwargs.get('template_name'), kwargs['provider_data'], kwargs['stream'])) thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join()
def cleanup_vms(texts, max_hours=24, providers=None, prompt=True): """ Main method for the cleanup process Generates regex match objects Checks providers for cleanup boolean in yaml Checks provider connectivity (using ping) Threads process_provider_vms to build list of vms to delete Prompts user to continue with delete Threads deleting of the vms :param texts: list of strings to match against :param max_hours: integer maximum number of hours that the VM can exist for :param providers: list of provider keys :param prompt: boolean, whether or not to prompt the user for each delete :return: 0 if user declines delete when prompt is True """ providers = providers or list_provider_keys() delta = datetime.timedelta(hours=int(max_hours)) vms_to_delete = defaultdict(set) thread_queue = [] # precompile regexes matchers = [re.compile(text, re.IGNORECASE) for text in texts] print( 'Matching VM names against the following case-insensitive strings: {}'. format(texts)) for provider_key in providers: # check for cleanup boolean if not cfme_data['management_systems'][provider_key].get( 'cleanup', False): print('Skipping {}, cleanup map set to false or missing in yaml'. format(provider_key)) continue ipaddress = cfme_data['management_systems'][provider_key].get( 'ipaddress') if ipaddress and not net.is_pingable(ipaddress): continue thread = Thread(target=process_provider_vms, args=(provider_key, matchers, delta, vms_to_delete)) # Mark as daemon thread for easy-mode KeyboardInterrupt handling thread.daemon = True thread_queue.append(thread) thread.start() # Join the queued calls for thread in thread_queue: thread.join() if vms_to_delete and prompt: yesno = raw_input('Delete these VMs? [y/N]: ') if str(yesno).lower() != 'y': print('Exiting.') return 0 if not vms_to_delete: print('No VMs to delete.') thread_queue = [] for provider_key, vm_set in vms_to_delete.items(): provider_mgmt = get_mgmt(provider_key) names_ages = [] for vm_name, vm_delta in vm_set: days, hours = vm_delta.days, vm_delta.seconds / 3600 age = '{} days, {} hours old'.format(days, hours) names_ages.append(NameAge(vm_name, age)) thread = Thread(target=delete_provider_vms, args=(provider_key, provider_mgmt, names_ages)) # Mark as daemon thread for easy-mode KeyboardInterrupt handling thread.daemon = True thread_queue.append(thread) thread.start() for thread in thread_queue: thread.join() with open(args.outfile, 'a') as report: report.write('## VM/Instances deleted via:\n' '## text matches: {}\n' '## age matches: {}\n'.format(texts, max_hours)) message = tabulate( deleted_vms_list, headers=['Provider', 'Name', 'Age', 'Status', 'Delete RC'], tablefmt='orgtbl') report.write(message + '\n') print(message) print("Deleting finished") return 0
""" import sys from traceback import format_exc from utils.providers import list_provider_keys, get_mgmt def main(*providers): for provider_key in list_provider_keys('openstack'): print('Checking {}'.format(provider_key)) api = get_mgmt(provider_key).api try: fips = api.floating_ips.findall(fixed_ip=None) except Exception: print('Unable to get fips for {}:'.format(provider_key)) print(format_exc().splitlines()[-1]) continue for fip in fips: print('Deleting {} on {}'.format(fip.ip, provider_key)) fip.delete() print('{} deleted'.format(fip.ip)) if __name__ == "__main__": provs = sys.argv[1:] if provs: main(*provs) else: main(*list_provider_keys("openstack"))
def cleanup_vms(texts, max_hours=24, providers=None, prompt=True): """ Main method for the cleanup process Generates regex match objects Checks providers for cleanup boolean in yaml Checks provider connectivity (using ping) Threads process_provider_vms to build list of vms to delete Prompts user to continue with delete Threads deleting of the vms Args: texts (list): List of regex strings to match with max_hours (int): age limit for deletion providers (list): List of provider keys to scan and cleanup prompt (bool): Whether or not to prompt the user before deleting vms Returns: int: return code, 0 on success, otherwise raises exception """ logger.info( 'Matching VM names against the following case-insensitive strings: %s', texts) # Compile regex, strip leading/trailing single quotes from cli arg matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts] providers_to_scan = [] for provider_key in providers or list_provider_keys(): # check for cleanup boolean if not cfme_data['management_systems'][provider_key].get( 'cleanup', False): logger.info('SKIPPING %s, cleanup set to false or missing in yaml', provider_key) continue logger.info('SCANNING %s', provider_key) providers_to_scan.append(provider_key) # scan providers for vms with name matches # manager = Manager() text_match_queue = manager.Queue() scan_fail_queue = manager.Queue() provider_scan_args = [(provider_key, matchers, text_match_queue, scan_fail_queue) for provider_key in providers_to_scan] pool_manager(scan_provider, provider_scan_args) text_matched = [] while not text_match_queue.empty(): text_matched.append(text_match_queue.get()) # scan vms for age matches age_match_queue = manager.Queue() vm_scan_args = [(provider_key, vm_name, timedelta(hours=int(max_hours)), age_match_queue, scan_fail_queue) for provider_key, vm_name in text_matched] pool_manager(scan_vm, vm_scan_args) vms_to_delete = [] while not age_match_queue.empty(): vms_to_delete.append(age_match_queue.get()) scan_fail_vms = [] # add the scan failures into deleted vms for reporting sake while not scan_fail_queue.empty(): scan_fail_vms.append(scan_fail_queue.get()) if vms_to_delete and prompt: yesno = raw_input('Delete these VMs? [y/N]: ') if str(yesno).lower() != 'y': logger.info('Exiting.') return 0 # initialize this even if we don't have anything to delete, for report consistency deleted_vms = [] if vms_to_delete: delete_queue = manager.Queue() delete_vm_args = [(provider_key, vm_name, age, delete_queue) for provider_key, vm_name, age in vms_to_delete] pool_manager(delete_vm, delete_vm_args) while not delete_queue.empty(): deleted_vms.append( delete_queue.get()) # Each item is a VmReport tuple else: logger.info('No VMs to delete.') with open(args.outfile, 'a') as report: report.write('## VM/Instances deleted via:\n' '## text matches: {}\n' '## age matches: {}\n'.format(texts, max_hours)) message = tabulate( sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')), headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'], tablefmt='orgtbl') report.write(message + '\n') logger.info(message) return 0