Beispiel #1
0
def pytest_configure(config):
    if store.parallelizer_role == 'master' or trackerbot.conf.get('url') is None:
        return

    # A further optimization here is to make the calls to trackerbot per provider
    # and perhaps only pull the providers that are needed, however that will need
    # to ensure that the tests that just randomly use providers adhere to the filters
    # which may be too tricky right now.

    count = 0

    if not config.getoption('use_template_cache'):
        store.terminalreporter.line("Loading templates from trackerbot...", green=True)
        provider_templates = trackerbot.provider_templates(trackerbot.api())
        for provider in list_provider_keys():
            TEMPLATES[provider] = provider_templates.get(provider, [])
            config.cache.set('miq-trackerbot/{}'.format(provider), TEMPLATES[provider])
            count += len(TEMPLATES[provider])
    else:
        store.terminalreporter.line("Using templates from cache...", green=True)
        provider_templates = None
        for provider in list_provider_keys():
            templates = config.cache.get('miq-trackerbot/{}'.format(provider), None)
            if templates is None:
                store.terminalreporter.line(
                    "Loading templates for {} from source as not in cache".format(
                        provider), green=True)
                if not provider_templates:
                    provider_templates = trackerbot.provider_templates(trackerbot.api())
                templates = provider_templates.get(provider, [])
                config.cache.set('miq-trackerbot/{}'.format(provider), templates)
            count += len(templates)
            TEMPLATES[provider] = templates
    store.terminalreporter.line("  Loaded {} templates successfully!".format(count), green=True)
Beispiel #2
0
def templates_uploaded_on_providers(api, stream, template):
    if get_untested_templates(api, stream, template):
        print(
            'report will not be generated, proceed with the next untested provider'
        )
        sys.exit()
    for temp in api.template.get(limit=1, tested=False,
                                 group__name=stream).get('objects', []):
        if 'template_rhevm' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('rhevm'),
                                        temp['providers']):
                return False
        if 'template_rhos' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('openstack'),
                                        temp['providers']):
                return False
        if 'template_vsphere' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('virtualcenter'),
                                        temp['providers']):
                return False
        if 'template_scvmm' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('scvmm'),
                                        temp['providers']):
                return False
    return True
Beispiel #3
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis, exclude_stacks,
               stack_template, output):
    with open(output, 'w') as report:
        report.write('ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        logger.info("----- Provider: %r -----", provider_key)
        logger.info("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        logger.info("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        logger.info("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        logger.info("Deleting old stacks...")
        delete_stacks(provider_mgmt=provider_mgmt,
                      excluded_stacks=exclude_stacks,
                      stack_template=stack_template,
                      output=output)
        logger.info("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
def run(**kwargs):
    # Setup defaults for the cli tool machine
    host = kwargs.get('ssh_host') or \
        cfme_data['template_upload']['template_upload_ec2']['aws_cli_tool_client']
    user = kwargs.get('ssh_user') or credentials['host_default']['username']
    passwd = kwargs.get('ssh_pass') or credentials['host_default']['password']
    # Download file once and thread uploading to different gce regions
    with make_ssh_client(host, user, passwd) as ssh_client:
        file_name, file_path = download_image_file(kwargs.get('image_url'),
                                                   ssh_client)

    thread_queue = []
    for provider in list_provider_keys("gce"):
        # skip provider if block_upload is set
        provider_yaml = cfme_data.management_systems.get(provider)
        if (provider_yaml.get('template_upload')
                and provider_yaml.template_upload.get('block_upload')):
            logger.info(
                'Skipping upload on {} due to block_upload'.format(provider))
            continue
        template_name = kwargs.get('template_name')
        bucket_name = kwargs.get('bucket_name')
        stream = kwargs.get('stream')
        with make_ssh_client(host, user, passwd) as ssh_client:
            thread = Thread(target=upload_template,
                            args=(provider, template_name, stream, file_name,
                                  file_path, ssh_client, bucket_name))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

    for thread in thread_queue:
        thread.join()
Beispiel #5
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis,
               exclude_stacks, stack_template, output):
    with open(output, 'w') as report:
        report.write(
            'ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup'
        )
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        logger.info("----- Provider: %r -----", provider_key)
        logger.info("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        logger.info("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        logger.info("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        logger.info("Deleting old stacks...")
        delete_stacks(provider_mgmt=provider_mgmt,
                      excluded_stacks=exclude_stacks,
                      stack_template=stack_template,
                      output=output)
        logger.info("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
def run(**kwargs):
    # Setup defaults for the cli tool machine
    host = kwargs.get('ssh_host') or \
        cfme_data['template_upload']['template_upload_ec2']['aws_cli_tool_client']
    user = kwargs.get('ssh_user') or credentials['host_default']['username']
    passwd = kwargs.get('ssh_pass') or credentials['host_default']['password']
    # Download file once and thread uploading to different gce regions
    with make_ssh_client(host, user, passwd) as ssh_client:
        file_name, file_path = download_image_file(kwargs.get('image_url'),
                                                   ssh_client)

    thread_queue = []
    for provider in list_provider_keys("gce"):
        template_name = kwargs.get('template_name')
        bucket_name = kwargs.get('bucket_name')
        stream = kwargs.get('stream')
        with make_ssh_client(host, user, passwd) as ssh_client:
            thread = Thread(target=upload_template,
                            args=(provider, template_name, stream, file_name,
                                  file_path, ssh_client, bucket_name))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

    for thread in thread_queue:
        thread.join()
Beispiel #7
0
def azure_cleanup(nic_template, pip_template, days_old):
    logger.info('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
    logger.info("Date: {}".format(datetime.now()))
    errors = []
    for prov_key in list_provider_keys('azure'):
        logger.info("----- Provider: '%s' -----", prov_key)
        mgmt = get_mgmt(prov_key)
        mgmt.logger = logger
        for name, scr_id in mgmt.list_subscriptions():
            logger.info("Subscription '%s' is chosen", name)
            setattr(mgmt, 'subscription_id', scr_id)
            for resource_group in mgmt.list_resource_groups():
                mgmt.logger.info('Checking "%s" resource group:',
                                 resource_group)

                # removing stale nics
                try:
                    mgmt.remove_nics_by_search(nic_template, resource_group)
                except Exception as e:
                    logger.warning("NIC cleanup failed")
                    errors.append(e)

                # removing public ips
                try:
                    mgmt.remove_pips_by_search(pip_template, resource_group)
                except Exception as e:
                    logger.warning("Public IP cleanup failed")
                    errors.append(e)

                # removing stale stacks
                try:
                    stack_list = mgmt.list_stack(resource_group=resource_group,
                                                 days_old=days_old)
                    if stack_list:
                        removed_stacks = []
                        for stack in stack_list:
                            if mgmt.is_stack_empty(
                                    stack, resource_group=resource_group):
                                removed_stacks.append(stack)
                                mgmt.delete_stack(stack, resource_group)

                        if not removed_stacks:
                            logger.info(
                                "No empty stacks older '%s' days were found",
                                days_old)
                except Exception as e:
                    logger.warning("Removing Stacks failed")
                    errors.append(e)
                try:
                    mgmt.remove_unused_blobs(resource_group)
                except Exception as e:
                    logger.warning("Removing unused blobs failed")
                    errors.append(e)
    if errors:
        tb.format_exc()
        return 1
    else:
        return 0
def run(**kwargs):
    thread_queue = []
    providers = list_provider_keys("virtualcenter")
    if kwargs['provider_data']:
        mgmt_sys = providers = kwargs['provider_data']['management_systems']
    else:
        mgmt_sys = cfme_data.management_systems

    # Store thread results, no need to use a lock
    # because threads will not be adding new keys
    results = {provider: None for provider in providers}

    for provider in providers:
        # skip provider if block_upload is set
        if (mgmt_sys[provider].get('template_upload') and
                mgmt_sys[provider]['template_upload'].get('block_upload')):
            logger.info('Skipping upload on %s due to block_upload', provider)
            continue
        if kwargs['provider_data']:
            if mgmt_sys[provider]['type'] != 'virtualcenter':
                continue
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
        else:
            creds = credentials[mgmt_sys[provider]['credentials']]
            username = creds['username']
            password = creds['password']
        host_ip = mgmt_sys[provider]['ipaddress']
        hostname = mgmt_sys[provider]['hostname']
        client = VMWareSystem(hostname, username, password)

        if not net.is_pingable(host_ip):
            continue
        thread = Thread(target=upload_template,
                        args=(client, hostname, username, password, provider,
                              kwargs.get('image_url'), kwargs.get('template_name'),
                              kwargs['provider_data'], kwargs['stream'], results))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()

    failed_providers = [provider for provider, result in results.items() if result is False]
    skipped_providers = [provider for provider, result in results.items() if result is None]
    passed_providers = [provider for provider, result in results.items() if result]

    logger.info("providers skipped: %s", skipped_providers)
    logger.info("providers passed: %s", passed_providers)
    logger.info("providers failed: %s", failed_providers)

    if not passed_providers:
        raise Exception("Template upload failed for all providers")
    else:
        logger.info("Upload passed for at least 1 provider... success!")
Beispiel #9
0
def run(**kwargs):
    thread_queue = []
    providers = list_provider_keys("virtualcenter")
    if kwargs['provider_data']:
        mgmt_sys = providers = kwargs['provider_data']['management_systems']
    else:
        mgmt_sys = cfme_data.management_systems

    # Store thread results, no need to use a lock
    # because threads will not be adding new keys
    results = {provider: None for provider in providers}

    for provider in providers:
        # skip provider if block_upload is set
        if (mgmt_sys[provider].get('template_upload') and
                mgmt_sys[provider]['template_upload'].get('block_upload')):
            logger.info('Skipping upload on %s due to block_upload', provider)
            continue
        if kwargs['provider_data']:
            if mgmt_sys[provider]['type'] != 'virtualcenter':
                continue
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
        else:
            creds = credentials[mgmt_sys[provider]['credentials']]
            username = creds['username']
            password = creds['password']
        host_ip = mgmt_sys[provider]['ipaddress']
        hostname = mgmt_sys[provider]['hostname']
        client = VMWareSystem(hostname, username, password)

        if not net.is_pingable(host_ip):
            continue
        thread = Thread(target=upload_template,
                        args=(client, hostname, username, password, provider,
                              kwargs.get('image_url'), kwargs.get('template_name'),
                              kwargs['provider_data'], kwargs['stream'], results))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()

    failed_providers = [provider for provider, result in results.items() if result is False]
    skipped_providers = [provider for provider, result in results.items() if result is None]
    passed_providers = [provider for provider, result in results.items() if result]

    logger.info("providers skipped: %s", skipped_providers)
    logger.info("providers passed: %s", passed_providers)
    logger.info("providers failed: %s", failed_providers)

    if not passed_providers:
        raise Exception("Template upload failed for all providers")
    else:
        logger.info("Upload passed for at least 1 provider... success!")
def templates_uploaded_on_providers(api, stream, template):
    if get_untested_templates(api, stream, template):
        print('report will not be generated, proceed with the next untested provider')
        sys.exit()
    for temp in api.template.get(
            limit=1, tested=False, group__name=stream).get('objects', []):
        if 'template_rhevm' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('rhevm'), temp['providers']):
                return False
        if 'template_rhos' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('openstack'), temp['providers']):
                return False
        if 'template_vsphere' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('virtualcenter'), temp['providers']):
                return False
        if 'template_scvmm' in images_uploaded(stream):
            if not provider_in_the_list(list_provider_keys('scvmm'), temp['providers']):
                return False
    return True
def azure_cleanup(nic_template, pip_template, days_old):
        logger.info('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
        logger.info("Date: {}".format(datetime.now()))
        errors = []
        for prov_key in list_provider_keys('azure'):
            logger.info("----- Provider: '%s' -----", prov_key)
            mgmt = get_mgmt(prov_key)
            mgmt.logger = logger
            for name, scr_id in mgmt.list_subscriptions():
                logger.info("Subscription '%s' is chosen", name)
                setattr(mgmt, 'subscription_id', scr_id)
                for resource_group in mgmt.list_resource_groups():
                    mgmt.logger.info('Checking "%s" resource group:', resource_group)

                    # removing stale nics
                    try:
                        mgmt.remove_nics_by_search(nic_template, resource_group)
                    except Exception as e:
                        logger.exception("NIC cleanup failed")
                        errors.append(e)

                    # removing public ips
                    try:
                        mgmt.remove_pips_by_search(pip_template, resource_group)
                    except Exception as e:
                        logger.exception("Public IP cleanup failed")
                        errors.append(e)

                    # removing stale stacks
                    try:
                        stack_list = mgmt.list_stack(resource_group=resource_group,
                                                 days_old=days_old)
                        if stack_list:
                            removed_stacks = []
                            for stack in stack_list:
                                if mgmt.is_stack_empty(stack, resource_group=resource_group):
                                    removed_stacks.append(stack)
                                    mgmt.delete_stack(stack, resource_group)

                            if not removed_stacks:
                                logger.info("No empty stacks older '%s' days were found", days_old)
                    except Exception as e:
                        logger.exception("Removing Stacks failed")
                        errors.append(e)
                    try:
                        mgmt.remove_unused_blobs(resource_group)
                    except Exception as e:
                        logger.exception("Removing unused blobs failed")
                        errors.append(e)
        if errors:
            logger.error("Hit exceptions during cleanup! See logs.")
            return 1
        else:
            return 0
Beispiel #12
0
def run(**kwargs):

    try:
        thread_queue = []
        providers = list_provider_keys("openshift")
        if kwargs['provider_data']:
            mgmt_sys = providers = kwargs['provider_data'][
                'management_systems']
        else:
            mgmt_sys = cfme_data['management_systems']
        for provider in providers:
            # skip provider if block_upload is set
            if (mgmt_sys[provider].get('template_upload') and
                    mgmt_sys[provider]['template_upload'].get('block_upload')):
                logger.info('Skipping upload on {} due to block_upload'.format(
                    provider))
                continue
            if 'podtesting' not in mgmt_sys[provider]['tags']:
                continue
            if kwargs['provider_data']:
                username = mgmt_sys[provider]['username']
                password = mgmt_sys[provider]['password']
            else:
                ssh_creds = credentials[mgmt_sys[provider]['ssh_creds']]
                username = ssh_creds['username']
                password = ssh_creds['password']
                oc_creds = credentials[mgmt_sys[provider]['credentials']]
                oc_username = oc_creds['username']
                oc_password = oc_creds['password']
            host_ip = mgmt_sys[provider]['ipaddress']
            hostname = mgmt_sys[provider]['hostname']

            upload_parameters = cfme_data['template_upload'][
                'template_upload_openshift']
            upload_folder = kwargs.get('upload_folder',
                                       upload_parameters['upload_folder'])

            if not net.is_pingable(host_ip):
                continue
            thread = Thread(target=upload_template,
                            args=(hostname, username, password, provider,
                                  kwargs.get('image_url'),
                                  kwargs.get('template_name'),
                                  kwargs['provider_data'], kwargs['stream'],
                                  upload_folder, oc_username, oc_password))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

        for thread in thread_queue:
            thread.join()
    except Exception:
        logger.exception('Exception during run method')
        return False
def run(**kwargs):

    thread_queue = []
    providers = list_provider_keys("openstack")
    if kwargs['provider_data']:
        provider_data = kwargs['provider_data']
        mgmt_sys = providers = provider_data['management_systems']
    else:
        mgmt_sys = cfme_data.management_systems
    for provider in providers:
        # skip provider if block_upload is set
        if (mgmt_sys[provider].get('template_upload')
                and mgmt_sys[provider]['template_upload'].get('block_upload')):
            logger.info(
                'Skipping upload on {} due to block_upload'.format(provider))
            continue

        if kwargs['provider_data']:
            if mgmt_sys[provider]['type'] != 'openstack':
                continue
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
        else:
            mgmt_sys = cfme_data['management_systems']
            rhos_credentials = credentials[mgmt_sys[provider]['credentials']]
            default_host_creds = credentials['host_default']
            username = rhos_credentials['username']
            password = rhos_credentials['password']
            sshname = default_host_creds['username']
            sshpass = default_host_creds['password']
        rhosip = mgmt_sys[provider]['ipaddress']
        auth_url = mgmt_sys[provider]['auth_url']
        if not net.is_pingable(rhosip):
            continue
        if not net.net_check(ports.SSH, rhosip):
            logger.error("SSH connection to %r:%r failed, port unavailable",
                         provider, ports.SSH)
            continue
        thread = Thread(target=upload_template,
                        args=(rhosip, sshname, sshpass, username, password,
                              auth_url, provider, kwargs.get('image_url'),
                              kwargs.get('template_name'),
                              kwargs['provider_data'], kwargs['stream']))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()
Beispiel #14
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument(
        '--name',
        default='test_snapshot_',
        help='Starting pettern of snaphsot name '
        'e.g. --name test_ delete all snapshot starting with test_')
    parser.add_argument(
        '--providers',
        default=list_provider_keys("openstack"),
        nargs='+',
        help='List of provider keys e.g. --providers rhos13 rhos12')
    args = parser.parse_args()
    return args
def run(**kwargs):

    try:
        thread_queue = []
        providers = list_provider_keys("openshift")
        if kwargs['provider_data']:
            mgmt_sys = providers = kwargs['provider_data']['management_systems']
        else:
            mgmt_sys = cfme_data['management_systems']
        for provider in providers:
            # skip provider if block_upload is set
            if (mgmt_sys[provider].get('template_upload') and
                    mgmt_sys[provider]['template_upload'].get('block_upload')):
                logger.info('Skipping upload on {} due to block_upload'.format(provider))
                continue
            if 'podtesting' not in mgmt_sys[provider]['tags']:
                continue
            if kwargs['provider_data']:
                username = mgmt_sys[provider]['username']
                password = mgmt_sys[provider]['password']
            else:
                ssh_creds = credentials[mgmt_sys[provider]['ssh_creds']]
                username = ssh_creds['username']
                password = ssh_creds['password']
                oc_creds = credentials[mgmt_sys[provider]['credentials']]
                oc_username = oc_creds['username']
                oc_password = oc_creds['password']
            host_ip = mgmt_sys[provider]['ipaddress']
            hostname = mgmt_sys[provider]['hostname']

            upload_parameters = cfme_data['template_upload']['template_upload_openshift']
            upload_folder = kwargs.get('upload_folder', upload_parameters['upload_folder'])

            if not net.is_pingable(host_ip):
                continue
            thread = Thread(target=upload_template,
                            args=(hostname, username, password, provider,
                                  kwargs.get('image_url'), kwargs.get('template_name'),
                                  kwargs['provider_data'], kwargs['stream'], upload_folder,
                                  oc_username, oc_password))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

        for thread in thread_queue:
            thread.join()
    except Exception:
        logger.exception('Exception during run method')
        return False
def main(*providers):
    for provider_key in list_provider_keys('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))
def main(*providers):
    for provider_key in list_provider_keys('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))
Beispiel #18
0
def run(**kwargs):
    providers = list_provider_keys("scvmm")
    # Store result of each provider upload
    results = {provider: None for provider in providers}

    for provider in providers:
        mgmt_sys = cfme_data['management_systems'][provider]

        # skip provider if block_upload is set
        if (mgmt_sys.get('template_upload')
                and mgmt_sys['template_upload'].get('block_upload')):
            logger.info('SCVMM:%s skipping due to block_upload', provider)
            continue

        attempts = 2
        for i in range(0, attempts):
            try:
                results[
                    provider] = False  # assume it has failed... till it has passed
                logger.info('SCVMM:%s create template attempt %d/%d', provider,
                            i + 1, attempts)
                create_template(provider, **kwargs)
                logger.info('SCVMM:%s template creation done', provider)
                results[provider] = True
                break
            except Exception:
                logger.exception('Hit exception creating template on %s',
                                 provider)
                continue

    failed_providers = [
        provider for provider, result in results.items() if result is False
    ]
    skipped_providers = [
        provider for provider, result in results.items() if result is None
    ]
    passed_providers = [
        provider for provider, result in results.items() if result
    ]

    logger.info("providers skipped: %s", skipped_providers)
    logger.info("providers passed: %s", passed_providers)
    logger.info("providers failed: %s", failed_providers)

    if not passed_providers:
        raise Exception("Template upload failed for all providers")
    else:
        logger.info("Upload passed for at least 1 provider... success!")
def run(**kwargs):

    thread_queue = []
    providers = list_provider_keys("openstack")
    if kwargs['provider_data']:
        provider_data = kwargs['provider_data']
        mgmt_sys = providers = provider_data['management_systems']
    else:
        mgmt_sys = cfme_data.management_systems
    for provider in providers:
        # skip provider if block_upload is set
        if (mgmt_sys[provider].get('template_upload') and
                mgmt_sys[provider]['template_upload'].get('block_upload')):
            logger.info('Skipping upload on {} due to block_upload'.format(provider))
            continue

        if kwargs['provider_data']:
            if mgmt_sys[provider]['type'] != 'openstack':
                continue
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
        else:
            mgmt_sys = cfme_data['management_systems']
            rhos_credentials = credentials[mgmt_sys[provider]['credentials']]
            default_host_creds = credentials['host_default']
            username = rhos_credentials['username']
            password = rhos_credentials['password']
            sshname = default_host_creds['username']
            sshpass = default_host_creds['password']
        rhosip = mgmt_sys[provider]['ipaddress']
        auth_url = mgmt_sys[provider]['auth_url']
        if not net.is_pingable(rhosip):
            continue
        if not net.net_check(ports.SSH, rhosip):
            logger.error("SSH connection to %r:%r failed, port unavailable", provider, ports.SSH)
            continue
        thread = Thread(target=upload_template,
                        args=(rhosip, sshname, sshpass, username, password, auth_url, provider,
                              kwargs.get('image_url'), kwargs.get('template_name'),
                              kwargs['provider_data'], kwargs['stream']))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()
Beispiel #20
0
def process_tags(provider_keys, tags=None):
    """
    Process the tags provided on command line to build a list of provider keys that match
    :param tags: list of tags to match against cfme_data
    :param provider_keys list of provider_keys to append to
    :return: list or provider keys matching tags
    """
    # Check for tags first, build list of provider_keys from it
    if tags:
        all_provider_keys = list_provider_keys()
        for key in all_provider_keys:
            # need to check tags list against yaml tags list for intersection of a single tag
            yaml_tags = cfme_data['management_systems'][key]['tags']
            if any(tag in tags for tag in yaml_tags):
                print('Matched tag from {} on provider {}:tags:{}'.format(tags, key, yaml_tags))
                provider_keys.add(key)
def azure_cleanup(nic_template, pip_template, days_old, output):
    with open(output, 'w') as report:
        report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
        try:
            for provider_key in list_provider_keys('azure'):
                provider_mgmt = get_mgmt(provider_key)
                nic_list = provider_mgmt.list_free_nics(nic_template)
                pip_list = provider_mgmt.list_free_pip(pip_template)
                stack_list = provider_mgmt.list_stack(days_old=days_old)
                report.write("----- Provider: {} -----\n".format(provider_key))
                if nic_list:
                    report.write(
                        "Removing Nics with the name \'{}\':\n".format(
                            nic_template))
                    report.write("\n".join(str(k) for k in nic_list))
                    report.write("\n")
                    provider_mgmt.remove_nics_by_search(nic_template)
                else:
                    report.write(
                        "No \'{}\' NICs were found\n".format(nic_template))
                if pip_list:
                    report.write(
                        "Removing Public IPs with the name \'{}\':\n".format(
                            pip_template))
                    report.write("\n".join(str(k) for k in pip_list))
                    report.write("\n")
                    provider_mgmt.remove_pips_by_search(pip_template)
                else:
                    report.write("No \'{}\' Public IPs were found\n".format(
                        pip_template))
                if stack_list:
                    report.write("Removing empty Stacks:\n")
                    for stack in stack_list:
                        if provider_mgmt.is_stack_empty(stack):
                            provider_mgmt.delete_stack(stack)
                            report.write(
                                "Stack {} is empty - Removed\n".format(stack))
                else:
                    report.write(
                        "No stacks older than \'{}\' days were found\n".format(
                            days_old))
            return 0
        except Exception:
            report.write("Something bad happened during Azure cleanup\n")
            report.write(tb.format_exc())
            return 1
def run(**kwargs):

    try:
        thread_queue = []
        providers = list_provider_keys("virtualcenter")
        if kwargs['provider_data']:
            mgmt_sys = providers = kwargs['provider_data'][
                'management_systems']
        else:
            mgmt_sys = cfme_data.management_systems

        for provider in providers:
            # skip provider if block_upload is set
            if (mgmt_sys[provider].get('template_upload') and
                    mgmt_sys[provider]['template_upload'].get('block_upload')):
                logger.info('Skipping upload on {} due to block_upload'.format(
                    provider))
                continue
            if kwargs['provider_data']:
                if mgmt_sys[provider]['type'] != 'virtualcenter':
                    continue
                username = mgmt_sys[provider]['username']
                password = mgmt_sys[provider]['password']
            else:
                creds = credentials[mgmt_sys[provider]['credentials']]
                username = creds['username']
                password = creds['password']
            host_ip = mgmt_sys[provider]['ipaddress']
            hostname = mgmt_sys[provider]['hostname']
            client = VMWareSystem(hostname, username, password)

            if not net.is_pingable(host_ip):
                continue
            thread = Thread(target=upload_template,
                            args=(client, hostname, username, password,
                                  provider, kwargs.get('image_url'),
                                  kwargs.get('template_name'),
                                  kwargs['provider_data'], kwargs['stream']))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

        for thread in thread_queue:
            thread.join()
    except Exception:
        logger.exception('Exception during run method')
        return False
def run(**kwargs):

    try:
        thread_queue = []
        providers = list_provider_keys("virtualcenter")
        if kwargs['provider_data']:
            mgmt_sys = providers = kwargs['provider_data']['management_systems']
        else:
            mgmt_sys = cfme_data.management_systems

        for provider in providers:
            # skip provider if block_upload is set
            if (mgmt_sys[provider].get('template_upload') and
                    mgmt_sys[provider]['template_upload'].get('block_upload')):
                logger.info('Skipping upload on {} due to block_upload'.format(provider))
                continue
            if kwargs['provider_data']:
                if mgmt_sys[provider]['type'] != 'virtualcenter':
                    continue
                username = mgmt_sys[provider]['username']
                password = mgmt_sys[provider]['password']
            else:
                creds = credentials[mgmt_sys[provider]['credentials']]
                username = creds['username']
                password = creds['password']
            host_ip = mgmt_sys[provider]['ipaddress']
            hostname = mgmt_sys[provider]['hostname']
            client = VMWareSystem(hostname, username, password)

            if not net.is_pingable(host_ip):
                continue
            thread = Thread(target=upload_template,
                            args=(client, hostname, username, password, provider,
                                  kwargs.get('image_url'), kwargs.get('template_name'),
                                  kwargs['provider_data'], kwargs['stream']))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

        for thread in thread_queue:
            thread.join()
    except Exception:
        logger.exception('Exception during run method')
        return False
def azure_cleanup(nic_template, pip_template, days_old, output):
    with open(output, 'w') as report:
        report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
        try:
            for provider_key in list_provider_keys('azure'):
                provider_mgmt = get_mgmt(provider_key)
                nic_list = provider_mgmt.list_free_nics(nic_template)
                report.write("----- Provider: {} -----\n".format(provider_key))
                if nic_list:
                    report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
                    report.write("\n".join(str(k) for k in nic_list))
                    report.write("\n")
                    provider_mgmt.remove_nics_by_search(nic_template)
                else:
                    report.write("No \'{}\' NICs were found\n".format(nic_template))
                pip_list = provider_mgmt.list_free_pip(pip_template)
                if pip_list:
                    report.write("Removing Public IPs with the name \'{}\':\n".
                                 format(pip_template))
                    report.write("\n".join(str(k) for k in pip_list))
                    report.write("\n")
                    provider_mgmt.remove_pips_by_search(pip_template)
                else:
                    report.write("No \'{}\' Public IPs were found\n".format(pip_template))
                stack_list = provider_mgmt.list_stack(days_old=days_old)
                if stack_list:
                    report.write(
                        "Removing empty Stacks:\n")
                    for stack in stack_list:
                        if provider_mgmt.is_stack_empty(stack):
                            provider_mgmt.delete_stack(stack)
                            report.write("Stack {} is empty - Removed\n".format(stack))
                else:
                    report.write("No stacks older than \'{}\' days were found\n".format(
                        days_old))
            return 0
        except Exception:
            report.write("Something bad happened during Azure cleanup\n")
            report.write(tb.format_exc())
            return 1
def run(**kwargs):
    providers = list_provider_keys("scvmm")
    # Store result of each provider upload
    results = {provider: None for provider in providers}

    for provider in providers:
        mgmt_sys = cfme_data['management_systems'][provider]

        # skip provider if block_upload is set
        if (mgmt_sys.get('template_upload') and
                mgmt_sys['template_upload'].get('block_upload')):
            logger.info('SCVMM:%s skipping due to block_upload', provider)
            continue

        attempts = 2
        for i in range(0, attempts):
            try:
                results[provider] = False  # assume it has failed... till it has passed
                logger.info(
                    'SCVMM:%s create template attempt %d/%d', provider, i + 1, attempts)
                create_template(provider, **kwargs)
                logger.info('SCVMM:%s template creation done', provider)
                results[provider] = True
                break
            except Exception:
                logger.exception('Hit exception creating template on %s', provider)
                continue

    failed_providers = [provider for provider, result in results.items() if result is False]
    skipped_providers = [provider for provider, result in results.items() if result is None]
    passed_providers = [provider for provider, result in results.items() if result]

    logger.info("providers skipped: %s", skipped_providers)
    logger.info("providers passed: %s", passed_providers)
    logger.info("providers failed: %s", failed_providers)

    if not passed_providers:
        raise Exception("Template upload failed for all providers")
    else:
        logger.info("Upload passed for at least 1 provider... success!")
def run(**kwargs):
    # Setup defaults for the cli tool machine
    host = kwargs.get('ssh_host') or \
        cfme_data['template_upload']['template_upload_ec2']['aws_cli_tool_client']
    user = kwargs.get('ssh_user') or credentials['host_default']['username']
    passwd = kwargs.get('ssh_pass') or credentials['host_default']['password']
    # Download file once and thread uploading to different gce regions
    with make_ssh_client(host, user, passwd) as ssh_client:
        file_name, file_path = download_image_file(kwargs.get('image_url'), ssh_client)

    thread_queue = []
    for provider in list_provider_keys("gce"):
        # skip provider if block_upload is set
        provider_yaml = cfme_data.management_systems.get(provider)
        if (provider_yaml.get('template_upload') and
                provider_yaml.template_upload.get('block_upload')):
            logger.info('Skipping upload on {} due to block_upload'.format(provider))
            continue
        template_name = kwargs.get('template_name')
        bucket_name = kwargs.get('bucket_name')
        stream = kwargs.get('stream')
        with make_ssh_client(host, user, passwd) as ssh_client:
            thread = Thread(target=upload_template,
                            args=(provider,
                                  template_name,
                                  stream,
                                  file_name,
                                  file_path,
                                  ssh_client,
                                  bucket_name))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

    for thread in thread_queue:
        thread.join()
def get_orphaned_vmware_files(provider=None):
    providers = [provider] if provider else list_provider_keys("virtualcenter")

    for provider_key in providers:
        # we can add thread here
        get_datastores_per_host(provider_key)
def main(trackerbot_url, mark_usable=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = set(list_provider_keys())
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data['management_systems'][provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
            args=(provider_key, template_providers, unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id']
        for pt
        in trackerbot.depaginate(api, api.providertemplate.get())['objects']]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)

        group_name, datestamp, stream = trackerbot.parse_template(template_name)

        # Don't want sprout templates
        if group_name in ('sprout', 'rhevm-internal'):
            print('Ignoring {} from group {}'.format(template_name, group_name))
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(group_name, stream=stream)
        template = trackerbot.Template(template_name, group, datestamp)

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name, provider_key) in existing_provider_templates:
                print('Template {} already tracked for provider {}'.format(
                    template_name, provider_key))
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template, **usable)
                print('Added {} template {} on provider {} (datestamp: {})'.format(
                    group_name, template_name, provider_key, datestamp))
            except SlumberHttpBaseException as ex:
                print("{}\t{}".format(ex.response.status_code, ex.content))

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']:
        provider_key, template_name = pt['provider']['key'], pt['template']['name']
        if provider_key not in template_providers[template_name] \
                and provider_key not in unresponsive_providers:
            if provider_key in all_providers:
                print("Cleaning up template {} on {}".format(template_name, provider_key))
                trackerbot.delete_provider_template(api, provider_key, template_name)
            else:
                print("Skipping template cleanup {} on unknown provider {}".format(
                    template_name, provider_key))

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers']:
            print("Deleting template {} (no providers)".format(template['name']))
            api.template(template['name']).delete()
Beispiel #29
0
    if not provider_type or cmd_args.provider:
        provider_types = PROVIDER_TYPES
    elif provider_type in PROVIDER_TYPES:
        provider_types = [
            provider_type,
        ]
    else:
        logger.error('Template upload for %r is not implemented yet.',
                     provider_type)
        sys.exit(1)

    thread_queue = []

    # create uploader objects for each provider
    for provider_type in provider_types:
        provider_keys = list_provider_keys(provider_type)
        if cmd_args.provider:
            provider_keys = [
                x for x in cmd_args.provider if x in provider_keys
            ]

        for provider_key in provider_keys:
            if provider_key not in list_provider_keys(provider_type):
                continue

            # pulling class by provider type
            provider_template_upload = (
                cfme_data.management_systems[provider_key].get(
                    'template_upload', {}))
            uploader = CLASS_MAP[provider_type](
                provider_key=provider_key,
Beispiel #30
0
                output_list.append([provider_key,
                                    vm_name,
                                    status or NULL,
                                    creation or NULL,
                                    str(vm_type) or NULL])

    output_queue.put(output_list)
    return


if __name__ == "__main__":
    args = parse_cmd_line()
    # providers as a set when processing tags to ensure unique entries
    providers = set(args.provider)
    process_tags(providers, args.tag)
    providers = providers or list_provider_keys()

    queue = Queue()  # for MP output
    proc_list = [
        Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider))
        for provider in providers
    ]
    for proc in proc_list:
        proc.start()
    for proc in proc_list:
        proc.join()

    print('Done processing providers, assembling report...')

    # Now pull all the results off of the queue
    # Stacking the generator this way is equivalent to using list.extend instead of list.append
def run(template_name, image_url, stream, **kwargs):
    """
    Download file from image_url, upload it to an S3 bucket and import into ec2

    Should handle all ec2 regions and minimize uploading by copying images
    :param template_name: string name of the template
    :param image_url: string url to download template image
    :param stream: string stream name
    :param kwargs: other kwargs
    :return: none
    """
    mgmt_sys = cfme_data['management_systems']
    ami_name = template_name
    prov_to_upload = []
    valid_providers = [
        prov_key
        for prov_key in list_provider_keys("ec2")
        if ('disabled' not in mgmt_sys[prov_key]['tags'] and
            not mgmt_sys[prov_key].get('template_upload', {}).get('block_upload', False))
    ]

    if valid_providers:
        logger.info("Uploading to following enabled ec2 providers/regions: %r", valid_providers)
    else:
        logger.info('ERROR: No providers found with ec2 type and no disabled tag')
        return

    # Look for template name on all ec2 regions, in case we can just copy it
    # Also ec2 lets you upload duplicate names, so we'll skip upload if its already there
    for prov_key in valid_providers:
        ec2 = get_mgmt(provider_key=prov_key)
        try:
            ami_id = check_for_ami(ec2, ami_name)
        except MultipleImagesError:
            logger.info('ERROR: Already multiple images with name "%r"', ami_name)
            return

        if ami_id:
            # TODO roll this into a flag that copies it to regions without it
            logger.info('EC2 %r: AMI already exists with name "%r"', prov_key, ami_name)
            continue
        else:
            # Need to upload on this region
            prov_to_upload.append(prov_key)

    # See if we actually need to upload
    if not prov_to_upload:
        logger.info('DONE: No templates to upload, all regions have the ami: "%r"', ami_name)
        return

    # download image
    logger.info("INFO: Starting image download %r ...", kwargs.get('image_url'))
    file_name, file_path = download_image_file(image_url)
    logger.info("INFO: Image downloaded %r ...", file_path)

    # TODO: thread + copy within amazon for when we have multiple regions enabled
    # create ami's in the regions
    for prov_key in prov_to_upload:
        region = mgmt_sys[prov_key].get('region', prov_key)
        bucket_name = mgmt_sys[prov_key].get('upload_bucket_name', 'cfme-template-upload')

        logger.info('EC2:%r:%r Starting S3 upload of %r', prov_key, region, file_path)
        ec2 = get_mgmt(provider_key=prov_key)
        upload_to_s3(ec2=ec2, bucket_name=bucket_name, ami_name=ami_name, file_path=file_path)

        create_image(ec2=ec2, ami_name=ami_name, bucket_name=bucket_name)

        cleanup_s3(ec2=ec2, bucket_name=bucket_name, ami_name=ami_name)

        # Track it
        logger.info("EC2:%r:%r Adding template %r to trackerbot for stream %r",
                    prov_key, region, ami_name, stream)
        trackerbot.trackerbot_add_provider_template(stream, prov_key, ami_name)
        logger.info('EC2:%r:%r Template %r creation complete', prov_key, region, ami_name)
def main(trackerbot_url, mark_usable=None, selected_provider=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = (set(list_provider_keys())
                     if not selected_provider
                     else set(selected_provider))
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data.management_systems[provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
            args=(provider_key, template_providers, unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id']
        for pt
        in trackerbot.depaginate(api, api.providertemplate.get())['objects']]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)
        template_info = TemplateName.parse_template(template_name)

        # it turned out that some providers like ec2 may have templates w/o names.
        # this is easy protection against such issue.
        if not template_name.strip():
            logger.warn('Ignoring template w/o name on provider %s', provider_key)
            continue

        # Don't want sprout templates
        if template_info.group_name in ('sprout', 'rhevm-internal'):
            logger.info('Ignoring %s from group %s', template_name, template_info.group_name)
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(template_info.group_name, stream=template_info.stream)
        try:
            template = trackerbot.Template(template_name, group, template_info.datestamp)
        except ValueError:
            logger.exception('Failure parsing provider %s template: %s',
                             provider_key, template_name)
            continue

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name, provider_key) in existing_provider_templates:
                logger.info('Template %s already tracked for provider %s',
                            template_name, provider_key)
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template, **usable)
                logger.info('Added %s template %s on provider %s (datestamp: %s)',
                            template_info.group_name,
                            template_name,
                            provider_key,
                            template_info.datestamp)
            except SlumberHttpBaseException:
                logger.exception('%s: exception marking template %s', provider, template)

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']:
        key, template_name = pt['provider']['key'], pt['template']['name']
        if key not in template_providers[template_name] and key not in unresponsive_providers:
            if key in all_providers:
                logger.info("Cleaning up template %s on %s", template_name, key)
                trackerbot.delete_provider_template(api, key, template_name)
            else:
                logger.info("Skipping template cleanup %s on unknown provider %s",
                            template_name, key)

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers'] and template['name'].strip():
            logger.info("Deleting template %s (no providers)", template['name'])
            api.template(template['name']).delete()
Beispiel #33
0
def azure_cleanup(nic_template, pip_template, days_old):
        logger.info('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
        logger.info("Date: {}".format(datetime.now()))
        try:
            for prov_key in list_provider_keys('azure'):
                logger.info("----- Provider: {} -----".format(prov_key))
                mgmt = get_mgmt(prov_key)
                mgmt.logger = logger
                for name, scr_id in mgmt.list_subscriptions():
                    logger.info("subscription {s} is chosen".format(s=name))
                    setattr(mgmt, 'subscription_id', scr_id)
                    # removing stale nics
                    removed_nics = mgmt.remove_nics_by_search(nic_template)
                    if removed_nics:
                        logger.info('following nics were removed:')
                        for nic in removed_nics:
                            logger.info(nic[0])
                    else:
                        logger.info("No '{}' NICs were found".format(nic_template))

                    # removing public ips
                    removed_pips = mgmt.remove_pips_by_search(pip_template)
                    if removed_pips:
                        logger.info('following pips were removed:')
                        for pip in removed_pips:
                            logger.info(pip[0])
                    else:
                        logger.info("No '{}' Public IPs were found".format(pip_template))

                # removing stale stacks
                stack_list = mgmt.list_stack(days_old=days_old)
                if stack_list:
                    logger.info("Removing empty Stacks:")
                    removed_stacks = []
                    for stack in stack_list:
                        if mgmt.is_stack_empty(stack):
                            removed_stacks.append(stack)
                            mgmt.delete_stack(stack)

                    logger.info('following stacks were removed:')
                    for stack in removed_stacks:
                        logger.info([stack])
                else:
                    logger.info("No stacks older than '{}' days were found".format(
                        days_old))

                """
                Blob removal section
                """
                # TODO: update it later to use different subscriptions and resource groups
                logger.info("Removing 'bootdiagnostics-test*' containers")
                bootdiag_list = []
                for container in mgmt.container_client.list_containers():
                    if container.name.startswith('bootdiagnostics-test'):
                        bootdiag_list.append(container.name)
                        mgmt.container_client.delete_container(
                            container_name=container.name)

                logger.info('following disks were removed:')
                for disk in bootdiag_list:
                    logger.info([disk])

                logger.info("Removing unused blobs and disks")
                removed_disks = mgmt.remove_unused_blobs()
                if len(removed_disks['Managed']) > 0:
                    logger.info('Managed disks:')
                    logger.info(removed_disks['Managed'])

                if len(removed_disks['Unmanaged']) > 0:
                    logger.info('Unmanaged blobs:')
                    logger.info(removed_disks['Unmanaged'])
            return 0
        except Exception:
            logger.info("Something bad happened during Azure cleanup")
            logger.info(tb.format_exc())
            return 1
Beispiel #34
0
def generate_html_report(api, stream, filename, appliance_template):

    status = 'PASSED'
    number_of_images_before = len(images_uploaded(stream))
    if get_untested_templates(api, stream, appliance_template):
        print(
            'report will not be generated, proceed with the next untested provider'
        )
        sys.exit()
    stream_data = get_latest_tested_template_on_stream(api, stream,
                                                       appliance_template)

    if len(images_uploaded(stream)) > number_of_images_before:
        print(
            "new images are uploaded on latest directory, wait for upload on providers"
        )
        wait_for_templates_on_providers(api, stream, appliance_template)
    if appliance_template and appliance_template != stream_data[
            'template_name']:
        print("the report will be generated only for the latest templates")
        sys.exit()

    if stream_data and not get_untested_templates(
            api, stream_data['group_name'], appliance_template):
        print("Found tested template for {}".format(stream))
        print("Gathering tested template data for {}".format(stream))
        print("Updating the template log")
        stream_html = [
            stream_data['template_name'], stream_data['passed_on_providers'],
            stream_data['failed_on_providers'], stream_data['group_name'],
            stream_data['datestamp']
        ]
        if 'html' in filename:
            data = template_env.get_template(
                'template_tester_report.html').render(upstream=stream_html)
            with open(filename, 'w') as report:
                report.write(data)
        else:
            with open(filename, 'a+') as report:

                if 'template_rhos' not in images_uploaded(stream):
                    print(
                        '\n\nMISSING: Image for OpenStack in latest directory')
                    report.write(
                        '\n\nMISSING: Image for OpenStack in latest directory')
                elif provider_in_the_list(list_provider_keys('openstack'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_rhos']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Passed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('openstack'),
                                stream_data['passed_on_providers'])))
                elif provider_in_the_list(list_provider_keys('openstack'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_rhos']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Failed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('openstack'),
                                stream_data['failed_on_providers'])))
                else:
                    print(
                        '\n\nMISSING: OpenStack template is not available on any '
                        'rhos providers yet')
                    report.write(
                        '\n\nMISSING: OpenStack template is not available on any '
                        'rhos providers yet')

                if 'template_rhevm' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for RHEVM in latest directory')
                    report.write(
                        '\n\nMISSING: Image for RHEVM in latest directory')
                elif provider_in_the_list(list_provider_keys('rhevm'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_rhevm']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Passed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('rhevm'),
                                stream_data['passed_on_providers'])))
                elif provider_in_the_list(list_provider_keys('rhevm'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_rhevm']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Failed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('rhevm'),
                                stream_data['failed_on_providers'])))
                else:
                    print(
                        '\n\nMISSING: RHEVM template is not available on any '
                        'rhevm providers yet')
                    report.write(
                        '\n\nMISSING: RHEVM template is not available on any '
                        'rhevm providers yet')

                if 'template_vsphere' not in images_uploaded(stream):
                    print(
                        '\n\nMISSING: Image for VIRTUALCENTER in latest directory'
                    )
                    report.write(
                        '\n\nMISSING: Image for VIRTUALCENTER in latest directory'
                    )
                elif provider_in_the_list(list_provider_keys('virtualcenter'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_vsphere']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Passed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('virtualcenter'),
                                stream_data['passed_on_providers'])))
                elif provider_in_the_list(list_provider_keys('virtualcenter'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_vsphere']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Failed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('virtualcenter'),
                                stream_data['failed_on_providers'])))
                else:
                    print(
                        '\n\nMISSING: VIRTUALCENTER template is not available on any '
                        'vmware providers yet')
                    report.write(
                        '\n\nMISSING: VIRTUALCENTER template is not available on any '
                        'vmware providers yet')

                if 'template_scvmm' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for SCVMM in latest directory')
                    report.write(
                        '\n\nMISSING: Image for SCVMM in latest directory')
                elif provider_in_the_list(list_provider_keys('scvmm'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_scvmm']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Passed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('scvmm'),
                                stream_data['passed_on_providers'])))
                elif provider_in_the_list(list_provider_keys('scvmm'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_scvmm']))
                    list(
                        map(
                            lambda x: report.write('\n{}: Failed'.format(x)),
                            provider_in_the_list(
                                list_provider_keys('scvmm'),
                                stream_data['failed_on_providers'])))
                else:
                    print(
                        '\n\nMISSING: SCVMM template is not available on any '
                        'scvmm providers yet')
                    report.write(
                        '\n\nMISSING: SCVMM template is not available on any '
                        'scvmm providers yet')
                report.seek(0, 0)
                lines = report.readlines()
                template_missing = filter(lambda x: "MISSING" in x, lines)
                template_passed = filter(lambda x: "PASSED" in x, lines)
                template_failed = filter(lambda x: "FAILED" in x, lines)
                if template_failed:
                    status = "FAILED"

                if template_missing and not (template_passed
                                             or template_failed):
                    report.close()
                    sys.exit(
                        "Template is MISSING....Please verify uploads....")

        print("template_tester_results report generated:{}".format(status))
    else:
        print("No Templates tested on: {}".format(datetime.datetime.now()))
Beispiel #35
0
def run(**kwargs):

    for provider in list_provider_keys("scvmm"):

        kwargs = make_kwargs_scvmm(cfme_data, provider,
                                   kwargs.get('image_url'),
                                   kwargs.get('template_name'))
        check_kwargs(**kwargs)
        mgmt_sys = cfme_data['management_systems'][provider]
        host_fqdn = mgmt_sys['hostname_fqdn']
        creds = credentials[mgmt_sys['credentials']]

        # For powershell to work, we need to extract the User Name from the Domain
        user = creds['username'].split('\\')
        if len(user) == 2:
            username_powershell = user[1]
        else:
            username_powershell = user[0]

        username_scvmm = creds['domain'] + "\\" + creds['username']

        scvmm_args = {
            "hostname": mgmt_sys['ipaddress'],
            "username": username_powershell,
            "password": creds['password'],
            "domain": creds['domain'],
            "provisioning": mgmt_sys['provisioning']
        }
        client = SCVMMSystem(**scvmm_args)

        url = kwargs.get('image_url')

        # Template name equals either user input of we extract the name from the url
        new_template_name = kwargs.get('template_name')
        if new_template_name is None:
            new_template_name = os.path.basename(url)[:-4]

        logger.info("SCVMM:%s Make Template out of the VHD %s", provider,
                    new_template_name)

        # use_library is either user input or we use the cfme_data value
        library = kwargs.get('library',
                             mgmt_sys['template_upload'].get('vhds', None))

        logger.info("SCVMM:%s Template Library: %s", provider, library)

        #  The VHD name changed, match the template_name.
        new_vhd_name = new_template_name + '.vhd'

        network = mgmt_sys['template_upload'].get('network', None)
        os_type = mgmt_sys['template_upload'].get('os_type', None)
        cores = mgmt_sys['template_upload'].get('cores', None)
        ram = mgmt_sys['template_upload'].get('ram', None)

        # Uses PowerShell Get-SCVMTemplate to return a list of  templates and aborts if exists.
        if not client.does_template_exist(new_template_name):
            if kwargs.get('upload'):
                logger.info(
                    "SCVMM:%s Uploading VHD image to Library VHD folder.",
                    provider)
                upload_vhd(client, url, library, new_vhd_name)
            if kwargs.get('template'):
                logger.info("SCVMM:%s Make Template out of the VHD %s",
                            provider, new_template_name)

                make_template(client, host_fqdn, new_template_name, library,
                              network, os_type, username_scvmm, cores, ram)
            try:
                wait_for(lambda: client.does_template_exist(new_template_name),
                         fail_condition=False,
                         delay=5)
                logger.info("SCVMM:%s template %s uploaded success", provider,
                            new_template_name)
                logger.info("SCVMM:%s Add template %s to trackerbot", provider,
                            new_template_name)
                trackerbot.trackerbot_add_provider_template(
                    kwargs.get('stream'), provider,
                    kwargs.get('template_name'))
            except Exception:
                logger.exception(
                    "SCVMM:%s Exception verifying the template %s", provider,
                    new_template_name)
        else:
            logger.info(
                "SCVMM: A Template with that name already exists in the SCVMMLibrary"
            )
Beispiel #36
0
def main(trackerbot_url, mark_usable=None, selected_provider=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = (set(list_provider_keys())
                     if not selected_provider else set(selected_provider))
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data.management_systems[provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
                        args=(provider_key, template_providers,
                              unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id'] for pt in trackerbot.depaginate(
            api, api.providertemplate.get())['objects']
    ]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)
        template_info = TemplateName.parse_template(template_name)

        # Don't want sprout templates
        if template_info.group_name in ('sprout', 'rhevm-internal'):
            logger.info('Ignoring %s from group %s', template_name,
                        template_info.group_name)
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(template_info.group_name,
                                 stream=template_info.stream)
        try:
            template = trackerbot.Template(template_name, group,
                                           template_info.datestamp)
        except ValueError:
            logger.exception('Failure parsing provider %s template: %s',
                             provider_key, template_name)
            continue

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name,
                              provider_key) in existing_provider_templates:
                logger.info('Template %s already tracked for provider %s',
                            template_name, provider_key)
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template,
                                                  **usable)
                logger.info(
                    'Added %s template %s on provider %s (datestamp: %s)',
                    template_info.group_name, template_name, provider_key,
                    template_info.datestamp)
            except SlumberHttpBaseException:
                logger.exception('%s: exception marking template %s', provider,
                                 template)

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api,
                                    api.providertemplate.get())['objects']:
        key, template_name = pt['provider']['key'], pt['template']['name']
        if key not in template_providers[
                template_name] and key not in unresponsive_providers:
            if key in all_providers:
                logger.info("Cleaning up template %s on %s", template_name,
                            key)
                trackerbot.delete_provider_template(api, key, template_name)
            else:
                logger.info(
                    "Skipping template cleanup %s on unknown provider %s",
                    template_name, key)

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers']:
            logger.info("Deleting template %s (no providers)",
                        template['name'])
            api.template(template['name']).delete()
def main(*providers):
    for provider_key in providers:
        print("Cleaning up {}".format(provider_key))
        api = get_mgmt(provider_key).capi
        try:
            volumes = api.volumes.findall(attachments=[])
        except Exception as e:
            print("Connect to provider failed:{} {} {}".format(
                provider_key,
                type(e).__name__, str(e)))
            continue

        for volume in volumes:
            if iso8601.parse_date(volume.created_at) < (
                    datetime.now(tz=local_tz) - GRACE_TIME):
                print("Deleting {}".format(volume.id))
                try:
                    volume.delete()
                except Exception as e:
                    print("Delete failed: {} {}".format(
                        type(e).__name__, str(e)))


if __name__ == "__main__":
    provs = sys.argv[1:]
    if provs:
        main(*provs)
    else:
        main(*list_provider_keys("openstack"))
Beispiel #38
0
        print(template_name)
        sys.exit(0)

    if not provider_type or cmd_args.provider:
        provider_types = PROVIDER_TYPES
    elif provider_type in PROVIDER_TYPES:
        provider_types = [provider_type, ]
    else:
        logger.error('Template upload for %r is not implemented yet.', provider_type)
        sys.exit(1)

    thread_queue = []

    # threaded loop
    for provider_type in provider_types:
        provider_keys = list_provider_keys(provider_type)
        if cmd_args.provider:
            provider_keys = filter(lambda x: x in provider_keys, cmd_args.provider)

        for provider_key in provider_keys:
            if provider_key not in list_provider_keys(provider_type):
                continue

            # pulling class by provider type
            provider_template_upload = (cfme_data.management_systems[provider_key]
                                        .get('template_upload', {}))
            uploader = CLASS_MAP[provider_type](
                provider_key=provider_key,
                stream=stream,
                template_name=template_name,
                image_url=image_url,
def run(**kwargs):
    """Calls all the functions needed to upload new template to RHEVM.
       This is called either by template_upload_all script, or by main function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    thread_queue = []
    valid_providers = []

    providers = list_provider_keys("rhevm")
    if kwargs['provider_data']:
        mgmt_sys = providers = kwargs['provider_data']['management_systems']
    for provider in providers:
        if kwargs['provider_data']:
            if mgmt_sys[provider]['type'] != 'rhevm':
                continue
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
            rhevip = mgmt_sys[provider]['ipaddress']
        else:
            mgmt_sys = cfme_data['management_systems']
            ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds']
            sshname = credentials[ssh_rhevm_creds]['username']
            sshpass = credentials[ssh_rhevm_creds]['password']
            rhevip = mgmt_sys[provider]['ipaddress']

        if (mgmt_sys[provider].get('template_upload') and
                mgmt_sys[provider]['template_upload'].get('block_upload')):
            # Providers template_upload section indicates upload should not happen on this provider
            continue

        logger.info("RHEVM:%r verifying provider's state before template upload", provider)
        if not net.is_pingable(rhevip):
            continue
        elif not is_ovirt_engine_running(rhevip, sshname, sshpass):
            logger.info('RHEVM:%r ovirt-engine service not running..', provider)
            continue
        valid_providers.append(provider)

    for provider in valid_providers:
        if kwargs['provider_data']:
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
        else:
            ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds']
            sshname = credentials[ssh_rhevm_creds]['username']
            sshpass = credentials[ssh_rhevm_creds]['password']
            rhevm_credentials = mgmt_sys[provider]['credentials']
            username = credentials[rhevm_credentials]['username']
            password = credentials[rhevm_credentials]['password']
        rhevip = mgmt_sys[provider]['ipaddress']
        thread = Thread(target=upload_template,
                        args=(rhevip, sshname, sshpass, username, password, provider,
                              kwargs.get('image_url'), kwargs.get('template_name'),
                              kwargs['provider_data'], kwargs['stream'],
                              kwargs['glance']))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()
"""
import sys
from traceback import format_exc

from cfme.utils.providers import list_provider_keys, get_mgmt


def main(*providers):
    for provider_key in list_provider_keys('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))


if __name__ == "__main__":
    provs = sys.argv[1:]
    if provs:
        main(*provs)
    else:
        main(*list_provider_keys("openstack"))
def get_orphaned_vmware_files(provider=None):
    providers = [provider] if provider else list_provider_keys("virtualcenter")

    for provider_key in providers:
        # we can add thread here
        get_datastores_per_host(provider_key)
def cleanup_vms(texts, max_hours=24, providers=None, prompt=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Threads process_provider_vms to build list of vms to delete
    Prompts user to continue with delete
    Threads deleting of the vms

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        prompt (bool): Whether or not to prompt the user before deleting vms
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info(
        'Matching VM names against the following case-insensitive strings: %r',
        texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    providers_to_scan = []
    for provider_key in providers or list_provider_keys():
        # check for cleanup boolean
        if not cfme_data['management_systems'][provider_key].get(
                'cleanup', False):
            logger.info('SKIPPING %r, cleanup set to false or missing in yaml',
                        provider_key)
            continue
        logger.info('SCANNING %r', provider_key)
        providers_to_scan.append(provider_key)

    # scan providers for vms with name matches
    # manager = Manager()
    text_match_queue = manager.Queue()
    scan_fail_queue = manager.Queue()
    provider_scan_args = [(provider_key, matchers, text_match_queue,
                           scan_fail_queue)
                          for provider_key in providers_to_scan]
    pool_manager(scan_provider, provider_scan_args)

    text_matched = []
    while not text_match_queue.empty():
        text_matched.append(text_match_queue.get())

    # scan vms for age matches
    age_match_queue = manager.Queue()
    vm_scan_args = [(provider_key, vm_name, timedelta(hours=int(max_hours)),
                     age_match_queue, scan_fail_queue)
                    for provider_key, vm_name in text_matched]
    pool_manager(scan_vm, vm_scan_args)

    vms_to_delete = []
    while not age_match_queue.empty():
        vms_to_delete.append(age_match_queue.get())

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    if vms_to_delete and prompt:
        yesno = raw_input('Delete these VMs? [y/N]: ')
        if str(yesno).lower() != 'y':
            logger.info('Exiting.')
            return 0

    # initialize this even if we don't have anything to delete, for report consistency
    deleted_vms = []
    if vms_to_delete:
        delete_queue = manager.Queue()
        delete_vm_args = [(provider_key, vm_name, age, delete_queue)
                          for provider_key, vm_name, age in vms_to_delete]
        pool_manager(delete_vm, delete_vm_args)

        while not delete_queue.empty():
            deleted_vms.append(
                delete_queue.get())  # Each item is a VmReport tuple

    else:
        logger.info('No VMs to delete.')

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'.format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl')
        report.write(message + '\n')
    logger.info(message)
    return 0
def run(**kwargs):

    for provider in list_provider_keys("scvmm"):

        # skip provider if block_upload is set
        if (cfme_data[provider].get('template_upload') and
                cfme_data[provider]['template_upload'].get('block_upload')):
            logger.info('Skipping upload on {} due to block_upload'.format(provider))
            continue

        kwargs = make_kwargs_scvmm(cfme_data, provider,
                                   kwargs.get('image_url'), kwargs.get('template_name'))
        check_kwargs(**kwargs)
        mgmt_sys = cfme_data['management_systems'][provider]
        host_fqdn = mgmt_sys['hostname_fqdn']
        creds = credentials[mgmt_sys['credentials']]

        # For powershell to work, we need to extract the User Name from the Domain
        user = creds['username'].split('\\')
        if len(user) == 2:
            username_powershell = user[1]
        else:
            username_powershell = user[0]

        username_scvmm = creds['domain'] + "\\" + creds['username']

        scvmm_args = {
            "hostname": mgmt_sys['ipaddress'],
            "username": username_powershell,
            "password": creds['password'],
            "domain": creds['domain'],
            "provisioning": mgmt_sys['provisioning']
        }
        client = SCVMMSystem(**scvmm_args)

        url = kwargs.get('image_url')

        # Template name equals either user input of we extract the name from the url
        new_template_name = kwargs.get('template_name')
        if new_template_name is None:
            new_template_name = os.path.basename(url)[:-4]

        logger.info("SCVMM:%s Make Template out of the VHD %s", provider, new_template_name)

        # use_library is either user input or we use the cfme_data value
        library = kwargs.get('library', mgmt_sys['template_upload'].get('vhds', None))

        logger.info("SCVMM:%s Template Library: %s", provider, library)

        #  The VHD name changed, match the template_name.
        new_vhd_name = new_template_name + '.vhd'

        network = mgmt_sys['template_upload'].get('network', None)
        os_type = mgmt_sys['template_upload'].get('os_type', None)
        cores = mgmt_sys['template_upload'].get('cores', None)
        ram = mgmt_sys['template_upload'].get('ram', None)

        # Uses PowerShell Get-SCVMTemplate to return a list of  templates and aborts if exists.
        if not client.does_template_exist(new_template_name):
            if kwargs.get('upload'):
                logger.info("SCVMM:%s Uploading VHD image to Library VHD folder.", provider)
                upload_vhd(client, url, library, new_vhd_name)
            if kwargs.get('template'):
                logger.info("SCVMM:%s Make Template out of the VHD %s", provider, new_template_name)

                make_template(
                    client,
                    host_fqdn,
                    new_template_name,
                    library,
                    network,
                    os_type,
                    username_scvmm,
                    cores,
                    ram
                )
            try:
                wait_for(lambda: client.does_template_exist(new_template_name),
                         fail_condition=False, delay=5)
                logger.info("SCVMM:%s template %s uploaded success", provider, new_template_name)
                logger.info("SCVMM:%s Add template %s to trackerbot", provider, new_template_name)
                trackerbot.trackerbot_add_provider_template(kwargs.get('stream'),
                                                            provider,
                                                            kwargs.get('template_name'))
            except Exception:
                logger.exception("SCVMM:%s Exception verifying the template %s",
                                 provider, new_template_name)
        else:
            logger.info("SCVMM: A Template with that name already exists in the SCVMMLibrary")
def generate_html_report(api, stream, filename, appliance_template):

    status = 'PASSED'
    number_of_images_before = len(images_uploaded(stream))
    if get_untested_templates(api, stream, appliance_template):
        print('report will not be generated, proceed with the next untested provider')
        sys.exit()
    stream_data = get_latest_tested_template_on_stream(api, stream, appliance_template)

    if len(images_uploaded(stream)) > number_of_images_before:
        print("new images are uploaded on latest directory, wait for upload on providers")
        wait_for_templates_on_providers(api, stream, appliance_template)
    if appliance_template and appliance_template != stream_data['template_name']:
        print("the report will be generated only for the latest templates")
        sys.exit()

    if stream_data and not get_untested_templates(api, stream_data['group_name'],
                                                  appliance_template):
        print("Found tested template for {}".format(stream))
        print("Gathering tested template data for {}".format(stream))
        print("Updating the template log")
        stream_html = [stream_data['template_name'], stream_data['passed_on_providers'],
                       stream_data['failed_on_providers'], stream_data['group_name'],
                       stream_data['datestamp']]
        if 'html' in filename:
            data = template_env.get_template('template_tester_report.html').render(
                upstream=stream_html)
            with open(filename, 'w') as report:
                report.write(data)
        else:
            with open(filename, 'a+') as report:

                if 'template_rhos' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for OpenStack in latest directory')
                    report.write('\n\nMISSING: Image for OpenStack in latest directory')
                elif provider_in_the_list(list_provider_keys('openstack'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(images_uploaded(stream)['template_rhos']))
                    map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
                        list_provider_keys('openstack'), stream_data['passed_on_providers']))
                elif provider_in_the_list(list_provider_keys('openstack'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(images_uploaded(stream)['template_rhos']))
                    map(lambda (x): report.write('\n{}: Failed'.format(x)),
                        provider_in_the_list(list_provider_keys('openstack'),
                                             stream_data['failed_on_providers']))
                else:
                    print('\n\nMISSING: OpenStack template is not available on any '
                          'rhos providers yet')
                    report.write('\n\nMISSING: OpenStack template is not available on any '
                                 'rhos providers yet')

                if 'template_rhevm' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for RHEVM in latest directory')
                    report.write('\n\nMISSING: Image for RHEVM in latest directory')
                elif provider_in_the_list(list_provider_keys('rhevm'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_rhevm']))
                    map(lambda(x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
                        list_provider_keys('rhevm'), stream_data['passed_on_providers']))
                elif provider_in_the_list(list_provider_keys('rhevm'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_rhevm']))
                    map(lambda(x): report.write('\n{}: Failed'.format(x)),
                        provider_in_the_list(list_provider_keys('rhevm'),
                                             stream_data['failed_on_providers']))
                else:
                    print('\n\nMISSING: RHEVM template is not available on any '
                          'rhevm providers yet')
                    report.write('\n\nMISSING: RHEVM template is not available on any '
                                 'rhevm providers yet')

                if 'template_vsphere' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for VIRTUALCENTER in latest directory')
                    report.write('\n\nMISSING: Image for VIRTUALCENTER in latest directory')
                elif provider_in_the_list(list_provider_keys('virtualcenter'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_vsphere']))
                    map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
                        list_provider_keys('virtualcenter'), stream_data['passed_on_providers']))
                elif provider_in_the_list(list_provider_keys('virtualcenter'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_vsphere']))
                    map(lambda (x): report.write('\n{}: Failed'.format(x)),
                        provider_in_the_list(list_provider_keys('virtualcenter'),
                                             stream_data['failed_on_providers']))
                else:
                    print('\n\nMISSING: VIRTUALCENTER template is not available on any '
                          'vmware providers yet')
                    report.write('\n\nMISSING: VIRTUALCENTER template is not available on any '
                                 'vmware providers yet')

                if 'template_scvmm' not in images_uploaded(stream):
                    print('\n\nMISSING: Image for SCVMM in latest directory')
                    report.write('\n\nMISSING: Image for SCVMM in latest directory')
                elif provider_in_the_list(list_provider_keys('scvmm'),
                                          stream_data['passed_on_providers']):
                    report.write('\n\nPASSED: {}'.format(
                        images_uploaded(stream)['template_scvmm']))
                    map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
                        list_provider_keys('scvmm'), stream_data['passed_on_providers']))
                elif provider_in_the_list(list_provider_keys('scvmm'),
                                          stream_data['failed_on_providers']):
                    report.write('\n\nFAILED: {}'.format(
                        images_uploaded(stream)['template_scvmm']))
                    map(lambda (x): report.write('\n{}: Failed'.format(x)),
                        provider_in_the_list(list_provider_keys('scvmm'),
                                             stream_data['failed_on_providers']))
                else:
                    print('\n\nMISSING: SCVMM template is not available on any '
                          'scvmm providers yet')
                    report.write('\n\nMISSING: SCVMM template is not available on any '
                                 'scvmm providers yet')
                report.seek(0, 0)
                lines = report.readlines()
                template_missing = filter(lambda (x): "MISSING" in x, lines)
                template_passed = filter(lambda (x): "PASSED" in x, lines)
                template_failed = filter(lambda (x): "FAILED" in x, lines)
                if template_failed:
                    status = "FAILED"

                if template_missing and not (template_passed or template_failed):
                    report.close()
                    sys.exit("Template is MISSING....Please verify uploads....")

        print("template_tester_results report generated:{}".format(status))
    else:
        print("No Templates tested on: {}".format(datetime.datetime.now()))
def run(**kwargs):
    """Calls all the functions needed to upload new template to RHEVM.
       This is called either by template_upload_all script, or by main function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    thread_queue = []
    valid_providers = []

    providers = list_provider_keys("rhevm")
    if kwargs.get('provider_data'):
        mgmt_sys = providers = kwargs['provider_data']['management_systems']
    for provider in providers:
        if kwargs.get('provider_data'):
            if mgmt_sys[provider]['type'] != 'rhevm':
                continue
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
            rhevip = mgmt_sys[provider]['ipaddress']
        else:
            mgmt_sys = cfme_data['management_systems']
            ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds']
            sshname = credentials[ssh_rhevm_creds]['username']
            sshpass = credentials[ssh_rhevm_creds]['password']
            rhevip = mgmt_sys[provider]['ipaddress']

        if (mgmt_sys[provider].get('template_upload')
                and mgmt_sys[provider]['template_upload'].get('block_upload')):
            # Providers template_upload section indicates upload should not happen on this provider
            continue

        logger.info(
            "RHEVM:%r verifying provider's state before template upload",
            provider)
        if not net.is_pingable(rhevip):
            continue
        elif not is_ovirt_engine_running(rhevip, sshname, sshpass):
            logger.info('RHEVM:%r ovirt-engine service not running..',
                        provider)
            continue
        valid_providers.append(provider)

    for provider in valid_providers:
        if kwargs.get('provider_data'):
            sshname = mgmt_sys[provider]['sshname']
            sshpass = mgmt_sys[provider]['sshpass']
            username = mgmt_sys[provider]['username']
            password = mgmt_sys[provider]['password']
        else:
            ssh_rhevm_creds = mgmt_sys[provider]['ssh_creds']
            sshname = credentials[ssh_rhevm_creds]['username']
            sshpass = credentials[ssh_rhevm_creds]['password']
            rhevm_credentials = mgmt_sys[provider]['credentials']
            username = credentials[rhevm_credentials]['username']
            password = credentials[rhevm_credentials]['password']
        rhevip = mgmt_sys[provider]['ipaddress']
        thread = Thread(target=upload_template,
                        args=(rhevip, sshname, sshpass, username, password,
                              provider, kwargs.get('image_url'),
                              kwargs.get('template_name'),
                              kwargs.get('provider_data'), kwargs['stream']))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()
Beispiel #46
0
def azure_cleanup(nic_template, pip_template, days_old, output):
    with open(output, 'w') as report:
        report.write('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
        try:
            for provider_key in list_provider_keys('azure'):
                provider_mgmt = get_mgmt(provider_key)
                nic_list = provider_mgmt.list_free_nics(nic_template)
                report.write("----- Provider: {} -----\n".format(provider_key))
                if nic_list:
                    report.write(
                        "\nRemoving Nics with the name \'{}\':\n".format(
                            nic_template))
                    report.write(
                        tabulate(tabular_data=[[nic] for nic in nic_list],
                                 headers=["Name"],
                                 tablefmt='orgtbl'))
                    provider_mgmt.remove_nics_by_search(nic_template)
                else:
                    report.write(
                        "No \'{}\' NICs were found\n".format(nic_template))
                pip_list = provider_mgmt.list_free_pip(pip_template)
                if pip_list:
                    report.write(
                        "\nRemoving Public IPs with the name \'{}\':\n".format(
                            pip_template))
                    report.write(
                        tabulate(tabular_data=[[pip] for pip in pip_list],
                                 headers=["Name"],
                                 tablefmt='orgtbl'))
                    provider_mgmt.remove_pips_by_search(pip_template)
                else:
                    report.write("No \'{}\' Public IPs were found\n".format(
                        pip_template))
                stack_list = provider_mgmt.list_stack(days_old=days_old)
                if stack_list:
                    report.write("\nRemoving empty Stacks:\n")
                    removed_stacks = []
                    for stack in stack_list:
                        if provider_mgmt.is_stack_empty(stack):
                            removed_stacks.append(stack)
                            provider_mgmt.delete_stack(stack)
                    report.write(
                        tabulate(tabular_data=[[st] for st in removed_stacks],
                                 headers=["Name"],
                                 tablefmt='orgtbl')
                    ) if len(removed_stacks) > 0 else None
                else:
                    report.write(
                        "\nNo stacks older than \'{}\' days were found\n".
                        format(days_old))
                """
                Blob removal section
                """
                report.write("\nRemoving 'bootdiagnostics-test*' containers\n")
                bootdiag_list = []
                for container in provider_mgmt.container_client.list_containers(
                ):
                    if container.name.startswith('bootdiagnostics-test'):
                        bootdiag_list.append(container.name)
                        provider_mgmt.container_client.delete_container(
                            container_name=container.name)
                report.write(
                    tabulate(tabular_data=[[disk] for disk in bootdiag_list],
                             headers=["Name"],
                             tablefmt='orgtbl'))
                report.write("\nRemoving unused blobs and disks\n")
                removed_disks = provider_mgmt.remove_unused_blobs()
                if len(removed_disks['Managed']) > 0:
                    report.write('Managed disks:\n')
                    report.write(
                        tabulate(tabular_data=removed_disks['Managed'],
                                 headers="keys",
                                 tablefmt='orgtbl'))
                if len(removed_disks['Unmanaged']) > 0:
                    report.write('\nUnmanaged blobs:\n')
                    report.write(
                        tabulate(tabular_data=removed_disks['Unmanaged'],
                                 headers="keys",
                                 tablefmt='orgtbl'))
            return 0
        except Exception:
            report.write("Something bad happened during Azure cleanup\n")
            report.write(tb.format_exc())
            return 1