Example #1
0
def vm_action(request, current_provider):
    if not request.user.is_authenticated():
        return HttpResponse("Not authenticated", content_type="text/plain")
    try:
        get_mgmt(current_provider)
    except Exception as e:
        return HttpResponse("Troubles with provider {}: {}".format(
            current_provider, str(e)),
                            content_type="text/plain")
    vm_name = request.POST["vm_name"]
    action = request.POST["action"]
    if action == "poweron":
        anyvm_power_on.delay(current_provider, vm_name)
    elif action == "poweroff":
        anyvm_power_off.delay(current_provider, vm_name)
    elif action == "suspend":
        anyvm_suspend.delay(current_provider, vm_name)
    elif action == "delete":
        anyvm_delete.delay(current_provider, vm_name)
    else:
        HttpResponse("No such action {}!".format(action),
                     content_type="text/plain")
    logger().info("User {} initiated {} on {}@{}".format(
        request.user.username, action, vm_name, current_provider))
    return HttpResponse("Action {} was initiated".format(action),
                        content_type="text/plain")
Example #2
0
    def get_mgmt_system(self):
        """ Returns the mgmt_system using the :py:func:`utils.providers.get_mgmt` method.
        """
        # gotta stash this in here to prevent circular imports
        from utils.providers import get_mgmt

        if self.key:
            return get_mgmt(self.key)
        elif getattr(self, 'provider_data', None):
            return get_mgmt(self.provider_data)
        else:
            raise ProviderHasNoKey('Provider %s has no key, so cannot get mgmt system')
Example #3
0
    def get_mgmt_system(self):
        """ Returns the mgmt_system using the :py:func:`utils.providers.get_mgmt` method.
        """
        # gotta stash this in here to prevent circular imports
        from utils.providers import get_mgmt

        if self.key:
            return get_mgmt(self.key)
        elif getattr(self, 'provider_data', None):
            return get_mgmt(self.provider_data)
        else:
            raise ProviderHasNoKey(
                'Provider %s has no key, so cannot get mgmt system')
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print '%s processing' % provider_key
    try:
        now = datetime.datetime.now()
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error('Failed to get creation/boot time for %s on %s' % (
                    vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print '%s finished' % provider_key
    except Exception as ex:
        with lock:
            print '%s failed' % provider_key
        logger.error('failed to process vms from provider %s', provider_key)
        logger.exception(ex)
def get_provider_templates(provider_key, template_providers,
                           unresponsive_providers, thread_lock):
    # functionalized to make it easy to farm this out to threads
    try:
        with thread_lock:
            provider_mgmt = get_mgmt(provider_key)
        if cfme_data['management_systems'][provider_key]['type'] == 'ec2':
            # dirty hack to filter out ec2 public images, because there are literally hundreds.
            templates = provider_mgmt.api.get_all_images(
                owners=['self'], filters={'image-type': 'machine'})
            templates = map(lambda i: i.name or i.id, templates)
        else:
            templates = provider_mgmt.list_template()
        print(provider_key, 'returned {} templates'.format(len(templates)))
        with thread_lock:
            for template in templates:
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if str(template).lower().endswith('db'):
                    continue
                template_providers[template].append(provider_key)
    except:
        print("{}\t{}\t{}".format(provider_key, 'failed:',
                                  traceback.format_exc().splitlines()[-1]))
        with thread_lock:
            unresponsive_providers.add(provider_key)
Example #6
0
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print '{} processing'.format(provider_key)
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error('Failed to get creation/boot time for %s on %s' % (
                    vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print '{} finished'.format(provider_key)
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print '{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex))
        logger.error('failed to process vms from provider %s', provider_key)
        logger.exception(ex)
Example #7
0
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Append vms meeting criteria to vms_to_delete

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: Uses the Queues to 'return' data
    """
    logger.info('%s: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vm()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%s: Exception listing vms', provider_key)
        return

    text_matched_vms = [name for name in vm_list if match(matchers, name)]
    for name in text_matched_vms:
        match_queue.put(VmProvider(provider_key, name))

    non_text_matching = set(vm_list) - set(text_matched_vms)
    logger.info('%s: NOT matching text filters: %s', provider_key, non_text_matching)
    logger.info('%s: MATCHED text filters: %s', provider_key, text_matched_vms)
Example #8
0
def cu_vm(provider, vm_name, template):
    """
    Deploys CU VM
    """
    provider_dict = cfme_data['management_systems'][provider]
    # TODO this key isn't in cfme qe yamls
    datastore = provider_dict['cap_and_util']['allowed_datastores']
    resource_pool = provider_dict['cap_and_util']['resource_pool']

    # TODO methods deploy_template calls don't accept resourcepool and  allowed_datastores as kwargs
    deploy_template(provider, vm_name, template,
        resourcepool=resource_pool, allowed_datastores=datastore)

    prov_mgmt = get_mgmt(provider)
    vm_running(prov_mgmt, vm_name)
    ip = prov_mgmt.get_ip_address(vm_name)

    # TODO this key isn't in cfme qe yamls
    vm_ssh_creds = provider_dict['capandu_vm_creds']
    sshname = credentials[vm_ssh_creds]['username']
    sshpass = credentials[vm_ssh_creds]['password']

    # Create cron jobs to generate disk and network activity on the CU VM.
    with make_ssh_client(ip, sshname, sshpass) as ssh_client:
        try:
            config_cu_vm(ssh_client)
        except CUCommandException:
            _vm_cleanup(prov_mgmt, vm_name)
            raise

    vm_running(prov_mgmt, vm_name)
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def get_registered_vm_files(provider_key):
    try:
        print("{} processing all the registered files..".format(provider_key))
        vm_registered_files = defaultdict(set)
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            try:
                vm_file_path = provider.get_vm_config_files_path(vm_name)
                vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path)
                vm_registered_files[vm_directory_name[0]] = vm_name
            except Exception as e:
                logger.error(e)
                logger.error('Failed to get creation/boot time for {} on {}'.format(
                    vm_name, provider_key))
                continue
        print("\n**************************REGISTERED FILES ON {}***********************\n".format(
            provider_key))
        for k, v in vm_registered_files.items():
            print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v))
        return vm_registered_files
    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
Example #11
0
    def get_mgmt_system(self):
        """ Returns the mgmt_system using the :py:func:`utils.providers.get_mgmt` method.
        """
        # gotta stash this in here to prevent circular imports
        from utils.providers import get_mgmt

        credentials = {'token': self.get_bearer_token()}

        if self.key:
            return get_mgmt(self.key, credentials=credentials)
        elif hasattr(self, 'provider_data'):
            return get_mgmt(self.provider_data, credentials=credentials)
        else:
            raise ProviderHasNoKey(
                'Provider {} has no key, so cannot get mgmt system'.format(
                    self.name))
Example #12
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    state = Appliance.POWER_STATES_MAPPING.get(manager.vm_status(vm_name), "unknown")
    return HttpResponse(state, content_type="text/plain")
Example #13
0
def cu_vm(provider, vm_name, template):
    """
    Deploys CU VM
    """
    provider_dict = cfme_data["management_systems"][provider]
    datastore = provider_dict["cap_and_util"]["allowed_datastores"]
    resource_pool = provider_dict["cap_and_util"]["resource_pool"]

    deploy_template(provider, vm_name, template, resourcepool=resource_pool, allowed_datastores=datastore)

    prov_mgmt = get_mgmt(provider)
    vm_running(prov_mgmt, vm_name)
    ip = prov_mgmt.get_ip_address(vm_name)

    vm_ssh_creds = provider_dict["capandu_vm_creds"]
    sshname = credentials[vm_ssh_creds]["username"]
    sshpass = credentials[vm_ssh_creds]["password"]

    # Create cron jobs to generate disk and network activity on the CU VM.
    ssh_client = make_ssh_client(ip, sshname, sshpass)
    try:
        config_cu_vm(ssh_client)
    except CUCommandException:
        _vm_cleanup(prov_mgmt, vm_name)
        raise
    vm_running(prov_mgmt, vm_name)
Example #14
0
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print('{} processing'.format(provider_key))
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error('Failed to get creation/boot time for %s on %s' %
                             (vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print('{} finished'.format(provider_key))
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print('{} failed ({}: {})'.format(provider_key,
                                              type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider %s', provider_key)
        logger.exception(ex)
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from
        cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data["management_systems"]
    for provider in providers:
        if cfme_data["management_systems"][provider]["type"] != "rhevm":
            continue
        if args.provider:
            if args.provider != provider:
                continue
        mgmt_sys = cfme_data["management_systems"][provider]
        ssh_rhevm_creds = mgmt_sys["hosts"][0]["credentials"]
        sshname = credentials[ssh_rhevm_creds]["username"]
        sshpass = credentials[ssh_rhevm_creds]["password"]

        api = get_mgmt(provider)
        edomain = get_edomain(api)
        if args.edomain:
            edomain = args.edomain
        path, edomain_ip = get_edomain_path(api, edomain)
        try:
            print "\n--------Start of {}--------".format(provider)
            change_edomain_state(api, "maintenance", edomain)
            cleanup_templates(api, edomain, args.days_old, args.max_templates)
        finally:
            cleanup_empty_dir_on_edomain(path, edomain_ip, sshname, sshpass)
            change_edomain_state(api, "active", edomain)
            print "--------End of {}--------\n".format(provider)
Example #16
0
def cu_vm(provider, vm_name, template):
    """
    Deploys CU VM
    """
    provider_dict = cfme_data['management_systems'][provider]
    datastore = provider_dict['cap_and_util']['allowed_datastores']
    resource_pool = provider_dict['cap_and_util']['resource_pool']

    deploy_template(provider, vm_name, template,
        resourcepool=resource_pool, allowed_datastores=datastore)

    prov_mgmt = get_mgmt(provider)
    vm_running(prov_mgmt, vm_name)
    ip = prov_mgmt.get_ip_address(vm_name)

    vm_ssh_creds = provider_dict['capandu_vm_creds']
    sshname = credentials[vm_ssh_creds]['username']
    sshpass = credentials[vm_ssh_creds]['password']

    # Create cron jobs to generate disk and network activity on the CU VM.
    ssh_client = make_ssh_client(ip, sshname, sshpass)
    try:
        config_cu_vm(ssh_client)
    except CUCommandException:
        _vm_cleanup(prov_mgmt, vm_name)
        raise
    vm_running(prov_mgmt, vm_name)
def get_registered_vm_files(provider_key):
    try:
        print("{} processing all the registered files..".format(provider_key))
        vm_registered_files = defaultdict(set)
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            try:
                vm_file_path = provider.get_vm_config_files_path(vm_name)
                vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path)
                vm_registered_files[vm_directory_name[0]] = vm_name
            except Exception as e:
                logger.error(e)
                logger.error(
                    'Failed to get creation/boot time for {} on {}'.format(
                        vm_name, provider_key))
                continue
        print(
            "\n**************************REGISTERED FILES ON {}***********************\n"
            .format(provider_key))
        for k, v in vm_registered_files.items():
            print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v))
        return vm_registered_files
    except Exception as ex:
        # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key,
                                          type(ex).__name__, str(ex)))
        logger.error(
            'failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {
            host: provider.list_host_datastore_url(host)
            for host in hosts
        }
        unregistered_files = []

        print(
            "\n*********************UNREGISTERED FILES ON: {}**********************\n"
            .format(provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
        # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key,
                                          type(ex).__name__, str(ex)))
        logger.error(
            'failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Append vms meeting criteria to vms_to_delete

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: Uses the Queues to 'return' data
    """
    logger.info('%s: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vm()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%s: Exception listing vms', provider_key)
        return

    text_matched_vms = [name for name in vm_list if match(matchers, name)]
    for name in text_matched_vms:
        match_queue.put(VmProvider(provider_key, name))

    non_text_matching = set(vm_list) - set(text_matched_vms)
    logger.info('%s: NOT matching text filters: %s', provider_key,
                non_text_matching)
    logger.info('%s: MATCHED text filters: %s', provider_key, text_matched_vms)
Example #20
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    state = Appliance.POWER_STATES_MAPPING.get(manager.vm_status(vm_name), "unknown")
    return HttpResponse(state, content_type="text/plain")
def delete_provider_vms(provider_key, vm_names):
    with lock:
        print('Deleting VMs from {} ...'.format(provider_key))

    try:
        with lock:
            provider = get_mgmt(provider_key)
    except Exception as e:
        with lock:
            print("Could not retrieve the provider {}'s mgmt system ({}: {})".
                  format(provider_key,
                         type(e).__name__, str(e)))
            logger.exception(e)

    for vm_name in vm_names:
        with lock:
            print("Deleting {} from {}".format(vm_name, provider_key))
        try:
            provider.delete_vm(vm_name)
        except Exception as e:
            with lock:
                print('Failed to delete {} on {}'.format(
                    vm_name, provider_key))
                logger.exception(e)
    with lock:
        print("{} is done!".format(provider_key))
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from
        cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in providers:
        if cfme_data['management_systems'][provider]['type'] != 'rhevm':
            continue
        if args.provider:
            if args.provider != provider:
                continue
        mgmt_sys = cfme_data['management_systems'][provider]
        ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials']
        sshname = credentials[ssh_rhevm_creds]['username']
        sshpass = credentials[ssh_rhevm_creds]['password']

        api = get_mgmt(provider)
        edomain = get_edomain(api)
        if args.edomain:
            edomain = args.edomain
        path, edomain_ip = get_edomain_path(api, edomain)
        try:
            print "\n--------Start of {}--------".format(provider)
            change_edomain_state(api, 'maintenance', edomain)
            cleanup_templates(api, edomain, args.days_old, args.max_templates)
        finally:
            cleanup_empty_dir_on_edomain(path, edomain_ip, sshname, sshpass)
            change_edomain_state(api, 'active', edomain)
            print "--------End of {}--------\n".format(provider)
Example #23
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis,
               output):
    with open(output, 'w') as report:
        report.write(
            'ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup'
        )
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        print("----- Provider: {} -----".format(provider_key))
        print("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        print("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        print("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        print("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
Example #24
0
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print "%s processing" % provider_key
    try:
        now = datetime.datetime.now()
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error("Failed to get creation/boot time for %s on %s" % (vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print "%s finished" % provider_key
    except Exception as ex:
        with lock:
            print "%s failed" % provider_key
        logger.error("failed to process vms from provider %s", provider_key)
        logger.exception(ex)
Example #25
0
def power_state_buttons(request, current_provider):
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
Example #26
0
def vms_table(request, current_provider=None):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    try:
        manager = get_mgmt(current_provider)
        vms = sorted(manager.list_vm())
        return render(request, 'appliances/vms/_list.html', locals())
    except Exception as e:
        return HttpResponse('{}: {}'.format(type(e).__name__, str(e)), content_type="text/plain")
Example #27
0
def power_state_buttons(request, current_provider):
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
Example #28
0
def vms_table(request, current_provider=None):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    try:
        manager = get_mgmt(current_provider)
        vms = sorted(manager.list_vm())
        return render(request, 'appliances/vms/_list.html', locals())
    except Exception as e:
        return HttpResponse('{}: {}'.format(type(e).__name__, str(e)), content_type="text/plain")
def list_vms(provider_key, output_queue):
    """
    List all the vms/instances on the given provider key
    Build list of lists with basic vm info: [[provider, vm, status, age, type], [etc]]
    :param provider_key: string provider key
    :param output_queue: a multiprocessing.Queue object to add results to
    :return: list of lists of vms and basic statistics
    """
    output_list = []

    print('Listing VMS on provider {}'.format(provider_key))
    provider = get_mgmt(provider_key)
    try:
        vm_list = provider.list_vm()
    except NotImplementedError:
        print('Provider does not support list_vm: {}'.format(provider_key))
        output_list.append([provider_key, 'Not Supported', NULL, NULL, NULL])
        return
    else:
        # TODO thread metadata collection for further speed improvements
        for vm_name in vm_list:
            # Init these meta values in case they fail to query
            status, creation, vm_type = None, None, None
            try:
                print('Collecting metadata for VM {} on provider {}'.format(vm_name, provider_key))
                # VMError raised for some vms in bad status
                # exception message contains useful information about VM status
                try:
                    status = provider.vm_status(vm_name)
                except VMError as ex:
                    status = ex.message

                creation = provider.vm_creation_time(vm_name)

                # different provider types implement different methods to get instance type info
                try:
                    vm_type = provider.vm_type(vm_name)
                except (AttributeError, NotImplementedError):
                    vm_type = provider.vm_hardware_configuration(vm_name)
                finally:
                    vm_type = vm_type or '--'
                    output_list.append([provider_key, vm_name, status, creation, str(vm_type)])

            except Exception as ex:
                print('Exception during provider processing on {}: {}'
                      .format(provider_key, ex.message))
                # Add the VM to the list anyway, we just might not have all metadata
                output_list.append([provider_key,
                                    vm_name,
                                    status or NULL,
                                    creation or NULL,
                                    str(vm_type) or NULL])
                continue

    output_queue.put(output_list)
    return
Example #30
0
def call_provider(provider_name, action, *args, **kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    provider = get_mgmt(provider_name)

    try:
        call = getattr(provider, action)
    except AttributeError:
        raise Exception('Action {} not found'.format(repr(action)))
    return call(*args, **kwargs)
def process_provider_vms(provider_key,
                         provider_type,
                         matchers,
                         delta,
                         vms_to_delete,
                         list_vms=None):
    with lock:
        print('{} processing'.format(provider_key))
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        vm_list = provider.list_vm()
        if list_vms:
            list_provider_vms(provider_key)

        for vm_name in vm_list:
            try:
                if not match(matchers, vm_name):
                    continue

                if provider_type == 'virtualcenter' and provider.vm_status(
                        vm_name) == 'poweredOff':
                    hostname = provider.get_vm_host_name(vm_name)
                    vm_config_datastore = provider.get_vm_config_files_path(
                        vm_name)
                    datastore_url = provider.get_vm_datastore_path(
                        vm_name, vm_config_datastore)
                    vm_creation_time = get_vm_config_modified_time(
                        hostname, vm_name, datastore_url, provider_key)
                else:
                    vm_creation_time = provider.vm_creation_time(vm_name)

                if vm_creation_time + delta < now:
                    vm_delta = now - vm_creation_time
                    with lock:
                        vms_to_delete[provider_key].add((vm_name, vm_delta))
            except Exception as e:
                logger.error(e)
                logger.error(
                    'Failed to get creation/boot time for {} on {}'.format(
                        vm_name, provider_key))
                continue

        with lock:
            print('{} finished'.format(provider_key))
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print('{} failed ({}: {})'.format(provider_key,
                                              type(ex).__name__, str(ex)))
        logger.error(
            'failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
Example #32
0
def power_state_buttons(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
Example #33
0
def power_state_buttons(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--provider', dest='provider_name', help='provider name in cfme_data')
    parser.add_argument('--vm_name', help='the name of the VM on which to act')
    parser.add_argument('--remove', help='remove disk from vm', action="store_true")
    args = parser.parse_args()

    provider = get_mgmt(args.provider_name)

    provider.connect_direct_lun_to_appliance(args.vm_name, args.remove)
def upload_template(rhevip, sshname, sshpass, username, password,
                    provider, image_url, template_name):
    try:
        print("RHEVM:{} Template {} upload started".format(provider, template_name))
        kwargs = make_kwargs_rhevm(cfme_data, provider)
        kwargs['image_url'] = image_url
        kwargs['template_name'] = template_name
        ovaname = get_ova_name(image_url)
        ssh_client = make_ssh_client(rhevip, sshname, sshpass)
        api = get_mgmt(kwargs.get('provider')).api

        if template_name is None:
            template_name = cfme_data['basic_info']['appliance_template']

        path, edomain_ip = get_edomain_path(api, kwargs.get('edomain'))

        kwargs = update_params_api(api, **kwargs)

        check_kwargs(**kwargs)

        if api.templates.get(template_name) is not None:
            print("RHEVM:{} Found finished template with name {}.".format(provider, template_name))
            print("RHEVM:{} The script will now end.".format(provider))
        else:
            print("RHEVM:{} Downloading .ova file...".format(provider))
            download_ova(ssh_client, kwargs.get('image_url'))
            try:
                print("RHEVM:{} Templatizing .ova file...".format(provider))
                template_from_ova(api, username, password, rhevip, kwargs.get('edomain'),
                                  ovaname, ssh_client)
                print("RHEVM:{} Importing new template...".format(provider))
                import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'),
                                kwargs.get('cluster'))
                print("RHEVM:{} Making a temporary VM from new template...".format(provider))
                make_vm_from_template(api, kwargs.get('cluster'))
                print("RHEVM:{} Adding disk to created VM...".format(provider))
                add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'),
                               kwargs.get('disk_format'), kwargs.get('disk_interface'))
                print("RHEVM:{} Templatizing VM...".format(provider))
                templatize_vm(api, template_name, kwargs.get('cluster'))
            finally:
                cleanup(api, kwargs.get('edomain'), ssh_client, ovaname, provider)
                change_edomain_state(api, 'maintenance', kwargs.get('edomain'), provider)
                cleanup_empty_dir_on_edomain(path, edomain_ip,
                                sshname, sshpass, provider)
                change_edomain_state(api, 'active', kwargs.get('edomain'), provider)
                ssh_client.close()
                api.disconnect()
                print("RHEVM:{} Template {} upload Ended".format(provider, template_name))
        print("RHEVM: Done.")
    except Exception as e:
        print(e)
        return False
Example #36
0
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from
        cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in providers:

        if cfme_data['management_systems'][provider]['type'] != 'rhevm':
            continue
        if args.provider:
            if args.provider != provider:
                continue

        mgmt_sys = cfme_data['management_systems'][provider]
        ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials']
        sshname = credentials[ssh_rhevm_creds]['username']
        sshpass = credentials[ssh_rhevm_creds]['password']

        if not net.is_pingable(
                cfme_data['management_systems'][provider]['ipaddress']):
            continue
        elif not is_ovirt_engine_running(
                cfme_data['management_systems'][provider]['ipaddress'],
                sshname, sshpass):
            print('ovirt-engine service not running..')
            continue

        try:
            print('connecting to provider, to establish api handler')
            api = get_mgmt(provider).api
            edomain = get_edomain(api)
            if args.edomain:
                edomain = args.edomain
            path, edomain_ip = get_edomain_path(api, edomain)
        except Exception as e:
            logger.exception(e)
            continue

        try:
            print("\n--------Start of {}--------".format(provider))
            cleanup_templates(api, edomain, args.days_old, args.max_templates)
        finally:
            change_edomain_state(api, 'maintenance', edomain)
            cleanup_empty_dir_on_edomain(path, edomain_ip, sshname, sshpass)
            change_edomain_state(api, 'active', edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
def ec2cleanup(max_hours, exclude_instances, exclude_volumes, exclude_eips):
    for provider in list_providers("ec2"):
        ec2provider = get_mgmt(provider)
        logger.info("\n" + provider + ":\n")
        logger.info("Deleted instances:")
        delete_old_instances(
            ec2provider=ec2provider, date=datetime.now(), maxhours=max_hours, excluded_instances=exclude_instances
        )
        sleep(120)
        logger.info("\nReleased addresses:")
        delete_disassociated_addresses(ec2provider=ec2provider, excluded_ips=exclude_eips)
        logger.info("\nDeleted volumes:")
        delete_unattached_volumes(ec2provider=ec2provider, excluded_volumes=exclude_volumes)
Example #38
0
def ec2cleanup(texts, max_hours, exclude_instances, exclude_volumes, exclude_eips, output):
    for provider in list_providers('ec2'):
        ec2provider = get_mgmt(provider)
        logger.info("\n" + provider + ":\n")
        logger.info("Deleted instances:")
        delete_old_instances(texts=texts, ec2provider=ec2provider, provider_key=provider,
                             date=datetime.datetime.now(), maxhours=max_hours,
                             excluded_instances=exclude_instances, output=output)
        time.sleep(120)
        logger.info("\nReleased addresses:")
        delete_disassociated_addresses(ec2provider=ec2provider, excluded_eips=exclude_eips)
        logger.info("\nDeleted volumes:")
        delete_unattached_volumes(ec2provider=ec2provider, excluded_volumes=exclude_volumes)
Example #39
0
def test_provider_refresh(request, setup_a_provider, rest_api):
    """Test checking that refresh invoked from the REST API works.

    It provisions a VM when the Provider inventory functionality is disabled, then the functionality
    is enabled and we wait for refresh to finish by checking the field in provider and then we check
    whether the VM appeared in the provider.

    Prerequisities:
        * A provider that is set up, with templates suitable for provisioning.

    Steps:
        * Disable the ``ems_inventory`` and ``ems_operations`` roles
        * Provision a VM
        * Store old refresh date from the provider
        * Initiate refresh
        * Wait until the refresh date updates
        * The VM should appear soon.

    Metadata:
        test_flag: rest
    """
    if "refresh" not in rest_api.collections.providers.action.all:
        pytest.skip("Refresh action is not implemented in this version")
    provider_mgmt = get_mgmt(setup_a_provider.key)
    provider = rest_api.collections.providers.find_by(
        name=setup_a_provider.name)[0]
    with server_roles_disabled("ems_inventory", "ems_operations"):
        vm_name = deploy_template(
            setup_a_provider.key, "test_rest_prov_refresh_{}".format(
                fauxfactory.gen_alphanumeric(length=4)))
        request.addfinalizer(lambda: provider_mgmt.delete_vm(vm_name))
    provider.reload()
    old_refresh_dt = provider.last_refresh_date
    assert provider.action.refresh()["success"], "Refresh was unsuccessful"
    wait_for(
        lambda: provider.last_refresh_date,
        fail_func=provider.reload,
        fail_condition=lambda refresh_date: refresh_date == old_refresh_dt,
        num_sec=720,
        delay=5,
    )
    # We suppose that thanks to the random string, there will be only one such VM
    wait_for(
        lambda: len(rest_api.collections.vms.find_by(name=vm_name)),
        fail_condition=lambda l: l == 0,
        num_sec=180,
        delay=10,
    )
    vms = rest_api.collections.vms.find_by(name=vm_name)
    if "delete" in vms[0].action.all:
        vms[0].action.delete()
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:

        # If a provider was passed, only cleanup on it, otherwise all rhevm providers
        cli_provider = kwargs.get('provider', None)
        if cli_provider and cli_provider != provider:
            continue

        provider_mgmt = get_mgmt(provider)

        if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress', None)):
            continue
        elif not is_ovirt_engine_running(provider_mgmt):
            print('ovirt-engine service not running..')
            continue

        try:
            print('connecting to provider, to establish api handler')
            edomain = kwargs.get('edomain', None)
            if not edomain:
                edomain = provider_mgmt.kwargs['template_upload']['edomain']
        except Exception as e:
            logger.exception(e)
            continue

        try:
            print("\n--------Start of {}--------".format(provider))
            cleanup_templates(provider_mgmt.api,
                              edomain,
                              kwargs.get('days_old'),
                              kwargs.get('max_templates'))
        finally:
            change_edomain_state(provider_mgmt,
                                 'maintenance',
                                 edomain)
            cleanup_empty_dir_on_edomain(provider_mgmt, edomain)

            change_edomain_state(provider_mgmt,
                                 'active',
                                 edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:

        # If a provider was passed, only cleanup on it, otherwise all rhevm providers
        cli_provider = kwargs.get('provider', None)
        if cli_provider and cli_provider != provider:
            continue

        provider_mgmt = get_mgmt(provider)

        if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress', None)):
            continue
        elif not is_ovirt_engine_running(provider_mgmt):
            print('ovirt-engine service not running..')
            continue

        try:
            print('connecting to provider, to establish api handler')
            edomain = kwargs.get('edomain', None)
            if not edomain:
                edomain = provider_mgmt.kwargs['template_upload']['edomain']
        except Exception as e:
            logger.exception(e)
            continue

        try:
            print("\n--------Start of {}--------".format(provider))
            cleanup_templates(provider_mgmt.api,
                              edomain,
                              kwargs.get('days_old'),
                              kwargs.get('max_templates'))
        finally:
            change_edomain_state(provider_mgmt,
                                 'maintenance',
                                 edomain)
            cleanup_empty_dir_on_edomain(provider_mgmt, edomain)

            change_edomain_state(provider_mgmt,
                                 'active',
                                 edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from
        cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in providers:

        if cfme_data['management_systems'][provider]['type'] != 'rhevm':
            continue
        if args.provider:
            if args.provider != provider:
                continue

        mgmt_sys = cfme_data['management_systems'][provider]
        ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials']
        sshname = credentials[ssh_rhevm_creds]['username']
        sshpass = credentials[ssh_rhevm_creds]['password']

        if not net.is_pingable(cfme_data['management_systems'][provider]['ipaddress']):
            continue
        elif not is_ovirt_engine_running(cfme_data['management_systems'][provider]['ipaddress'],
                                         sshname, sshpass):
            print('ovirt-engine service not running..')
            continue

        try:
            print('connecting to provider, to establish api handler')
            api = get_mgmt(provider).api
            edomain = get_edomain(api)
            if args.edomain:
                edomain = args.edomain
            path, edomain_ip = get_edomain_path(api, edomain)
        except Exception as e:
            logger.exception(e)
            continue

        try:
            print("\n--------Start of {}--------".format(provider))
            cleanup_templates(api, edomain, args.days_old, args.max_templates)
        finally:
            change_edomain_state(api, 'maintenance', edomain)
            cleanup_empty_dir_on_edomain(path, edomain_ip, sshname, sshpass)
            change_edomain_state(api, 'active', edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
Example #43
0
def vm(request, setup_a_provider, rest_api):
    if "refresh" not in rest_api.collections.providers.action.all:
        pytest.skip("Refresh action is not implemented in this version")
    provider_mgmt = get_mgmt(setup_a_provider.key)
    provider = rest_api.collections.providers.find_by(name=setup_a_provider.name)[0]
    vm_name = deploy_template(
        setup_a_provider.key,
        "test_rest_vm_{}".format(fauxfactory.gen_alphanumeric(length=4)))
    request.addfinalizer(lambda: provider_mgmt.delete_vm(vm_name))
    provider.action.refresh()
    wait_for(
        lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0,
        num_sec=600, delay=5)
    return vm_name
def scan_vm(provider_key, vm_name, delta, match_queue, scan_failure_queue):
    """Scan an individual VM for age

    Args:
        provider_key (string): the provider key from yaml
        vm_name (string): name of the VM to scan
        delta (datetime.timedelta) The timedelta to compare age against for matches
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age

    Returns:
        None: Uses the Queues to 'return' data
    """
    provider_mgmt = get_mgmt(provider_key)
    now = datetime.datetime.now(tz=pytz.UTC)
    # Nested exceptions to try and be safe about the scanned values and to get complete results
    failure = False
    status = NULL
    logger.info('%s: Scan VM %s...', provider_key, vm_name)
    try:
        # Localize to UTC
        vm_creation_time = provider_mgmt.vm_creation_time(vm_name)
    except Exception:  # noqa
        failure = True
        logger.exception('%s: Exception getting creation time for %s',
                         provider_key, vm_name)
        # This VM must have some problem, include in report even though we can't delete
        try:
            status = provider_mgmt.vm_status(vm_name)
        except Exception:  # noqa
            failure = True
            logger.exception('%s: Exception getting status for %s',
                             provider_key, vm_name)
            status = NULL
    finally:
        if failure:
            scan_failure_queue.put(
                VmReport(provider_key, vm_name, FAIL, status, NULL))
            return

    vm_delta = now - vm_creation_time
    logger.info('%s: VM %s age: %s', provider_key, vm_name, vm_delta)
    data = VmData(provider_key, vm_name, str(vm_delta))

    # test age to determine which queue it goes in
    if delta < vm_delta:
        match_queue.put(data)
    else:
        logger.info('%s: VM %s did not match age requirement', provider_key,
                    vm_name)
def list_provider_vms(provider_key):
    try:
        provider = get_mgmt(provider_key)
        vm_list = provider.list_vm()
        provider_type = cfme_data.get("management_systems", {})[provider_key].get('type', None)
        now = datetime.datetime.now()
        for vm_name in vm_list:
            creation = provider.vm_creation_time(vm_name)
            providers_vm_list.append([provider_type, provider_key, vm_name,
                                      (now - creation), provider.vm_type(vm_name),
                                      provider.vm_status(vm_name)])
    except Exception as e:
        logger.error('failed to list vms from provider {}'.format(provider_key))
        logger.exception(e)
Example #46
0
def test_provider_refresh(request, setup_a_provider, rest_api):
    """Test checking that refresh invoked from the REST API works.

    It provisions a VM when the Provider inventory functionality is disabled, then the functionality
    is enabled and we wait for refresh to finish by checking the field in provider and then we check
    whether the VM appeared in the provider.

    Prerequisities:
        * A provider that is set up, with templates suitable for provisioning.

    Steps:
        * Disable the ``ems_inventory`` and ``ems_operations`` roles
        * Provision a VM
        * Store old refresh date from the provider
        * Initiate refresh
        * Wait until the refresh date updates
        * The VM should appear soon.

    Metadata:
        test_flag: rest
    """
    if "refresh" not in rest_api.collections.providers.action.all:
        pytest.skip("Refresh action is not implemented in this version")
    provider_mgmt = get_mgmt(setup_a_provider.key)
    provider = rest_api.collections.providers.find_by(name=setup_a_provider.name)[0]
    with server_roles_disabled("ems_inventory", "ems_operations"):
        vm_name = deploy_template(
            setup_a_provider.key,
            "test_rest_prov_refresh_{}".format(fauxfactory.gen_alphanumeric(length=4)))
        request.addfinalizer(lambda: provider_mgmt.delete_vm(vm_name))
    provider.reload()
    old_refresh_dt = provider.last_refresh_date
    assert provider.action.refresh()["success"], "Refresh was unsuccessful"
    wait_for(
        lambda: provider.last_refresh_date,
        fail_func=provider.reload,
        fail_condition=lambda refresh_date: refresh_date == old_refresh_dt,
        num_sec=720,
        delay=5,
    )
    # We suppose that thanks to the random string, there will be only one such VM
    wait_for(
        lambda: len(rest_api.collections.vms.find_by(name=vm_name)),
        fail_condition=lambda l: l == 0,
        num_sec=180,
        delay=10,
    )
    vms = rest_api.collections.vms.find_by(name=vm_name)
    if "delete" in vms[0].action.all:
        vms[0].action.delete()
Example #47
0
def vm_action(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return HttpResponse("Not authenticated", content_type="text/plain")
    try:
        get_mgmt(current_provider)
    except Exception as e:
        return HttpResponse(
            "Troubles with provider {}: {}".format(current_provider, str(e)),
            content_type="text/plain")
    vm_name = request.POST["vm_name"]
    action = request.POST["action"]
    if action == "poweron":
        anyvm_power_on.delay(current_provider, vm_name)
    elif action == "poweroff":
        anyvm_power_off.delay(current_provider, vm_name)
    elif action == "suspend":
        anyvm_suspend.delay(current_provider, vm_name)
    elif action == "delete":
        anyvm_delete.delay(current_provider, vm_name)
    else:
        HttpResponse("No such action {}!".format(action), content_type="text/plain")
    logger().info("User {} initiated {} on {}@{}".format(
        request.user.username, action, vm_name, current_provider))
    return HttpResponse("Action {} was initiated".format(action), content_type="text/plain")
Example #48
0
def ec2cleanup(exclude_volumes, exclude_eips, output):
    with open(output, 'w') as report:
        report.write('ec2cleanup.py, Address and Volume Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        print "----- Provider: {} -----".format(provider_key)  # noqa
        print "Releasing addresses..."  # noqa
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
        print "Deleting volumes..."  # noqa
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
def main(*providers):
    for provider_key in list_providers('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))
Example #50
0
def vm(request, setup_a_provider, rest_api):
    if "refresh" not in rest_api.collections.providers.action.all:
        pytest.skip("Refresh action is not implemented in this version")
    provider_mgmt = get_mgmt(setup_a_provider.key)
    provider = rest_api.collections.providers.find_by(
        name=setup_a_provider.name)[0]
    vm_name = deploy_template(
        setup_a_provider.key,
        "test_rest_vm_{}".format(fauxfactory.gen_alphanumeric(length=4)))
    request.addfinalizer(lambda: provider_mgmt.delete_vm(vm_name))
    provider.action.refresh()
    wait_for(lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0,
             num_sec=600,
             delay=5)
    return vm_name
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov = get_mgmt(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov.get_ip_address(depot_machine_name)
    prov.delete_vm(depot_machine_name)
Example #52
0
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov = get_mgmt(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov.get_ip_address(depot_machine_name)
    prov.delete_vm(depot_machine_name)
def azure_cleanup(nic_template, pip_template, days_old, output):
    with open(output, 'w') as report:
        report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
        try:
            for provider_key in list_provider_keys('azure'):
                provider_mgmt = get_mgmt(provider_key)
                nic_list = provider_mgmt.list_free_nics(nic_template)
                pip_list = provider_mgmt.list_free_pip(pip_template)
                stack_list = provider_mgmt.list_stack(days_old=days_old)
                report.write("----- Provider: {} -----\n".format(provider_key))
                if nic_list:
                    report.write(
                        "Removing Nics with the name \'{}\':\n".format(
                            nic_template))
                    report.write("\n".join(str(k) for k in nic_list))
                    report.write("\n")
                    provider_mgmt.remove_nics_by_search(nic_template)
                else:
                    report.write(
                        "No \'{}\' NICs were found\n".format(nic_template))
                if pip_list:
                    report.write(
                        "Removing Public IPs with the name \'{}\':\n".format(
                            pip_template))
                    report.write("\n".join(str(k) for k in pip_list))
                    report.write("\n")
                    provider_mgmt.remove_pips_by_search(pip_template)
                else:
                    report.write("No \'{}\' Public IPs were found\n".format(
                        pip_template))
                if stack_list:
                    report.write("Removing empty Stacks:\n")
                    for stack in stack_list:
                        if provider_mgmt.is_stack_empty(stack):
                            provider_mgmt.delete_stack(stack)
                            report.write(
                                "Stack {} is empty - Removed\n".format(stack))
                else:
                    report.write(
                        "No stacks older than \'{}\' days were found\n".format(
                            days_old))
            return 0
        except Exception:
            report.write("Something bad happened during Azure cleanup\n")
            report.write(tb.format_exc())
            return 1
Example #54
0
def scan_vm(provider_key, vm_name, delta, match_queue, scan_failure_queue):
    """Scan an individual VM for age

    Args:
        provider_key (string): the provider key from yaml
        vm_name (string): name of the VM to scan
        delta (datetime.timedelta) The timedelta to compare age against for matches
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age

    Returns:
        None: Uses the Queues to 'return' data
    """
    provider_mgmt = get_mgmt(provider_key)
    now = datetime.datetime.now(tz=pytz.UTC)
    # Nested exceptions to try and be safe about the scanned values and to get complete results
    failure = False
    status = NULL
    logger.info('%s: Scan VM %s...', provider_key, vm_name)
    try:
        # Localize to UTC
        vm_creation_time = provider_mgmt.vm_creation_time(vm_name)
    except Exception:  # noqa
        failure = True
        logger.exception('%s: Exception getting creation time for %s', provider_key, vm_name)
        # This VM must have some problem, include in report even though we can't delete
        try:
            status = provider_mgmt.vm_status(vm_name)
        except Exception:  # noqa
            failure = True
            logger.exception('%s: Exception getting status for %s', provider_key, vm_name)
            status = NULL
    finally:
        if failure:
            scan_failure_queue.put(VmReport(provider_key, vm_name, FAIL, status, NULL))
            return

    vm_delta = now - vm_creation_time
    logger.info('%s: VM %s age: %s', provider_key, vm_name, vm_delta)
    data = VmData(provider_key, vm_name, str(vm_delta))

    # test age to determine which queue it goes in
    if delta < vm_delta:
        match_queue.put(data)
    else:
        logger.info('%s: VM %s did not match age requirement', provider_key, vm_name)
def main(*providers):
    for provider_key in providers:
        print "Cleaning up", provider_key
        api = get_mgmt(provider_key).capi
        try:
            volumes = api.volumes.findall(attachments=[])
        except Exception as e:
            print "Connect to provider failed:", provider_key, type(e).__name__, str(e)
            continue

        for volume in volumes:
            if iso8601.parse_date(volume.created_at) < (datetime.now(tz=local_tz) - GRACE_TIME):
                print "Deleting", volume.id
                try:
                    volume.delete()
                except Exception as e:
                    print "Delete failed:", type(e).__name__, str(e)
def process_provider_vms(provider_key, provider_type, matchers,
                         delta, vms_to_delete, list_vms=None):
    with lock:
        print('{} processing'.format(provider_key))
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        vm_list = provider.list_vm()
        if list_vms:
            list_provider_vms(provider_key)

        for vm_name in vm_list:
            try:
                if not match(matchers, vm_name):
                    continue

                if provider_type == 'virtualcenter' and provider.vm_status(vm_name) == 'poweredOff':
                    hostname = provider.get_vm_host_name(vm_name)
                    vm_config_datastore = provider.get_vm_config_files_path(vm_name)
                    datastore_url = provider.get_vm_datastore_path(vm_name, vm_config_datastore)
                    vm_creation_time = get_vm_config_modified_time(hostname, vm_name,
                                                                   datastore_url, provider_key)
                else:
                    vm_creation_time = provider.vm_creation_time(vm_name)

                if vm_creation_time + delta < now:
                    vm_delta = now - vm_creation_time
                    with lock:
                        vms_to_delete[provider_key].add((vm_name, vm_delta))
            except Exception as e:
                logger.error(e)
                logger.error('Failed to get creation/boot time for {} on {}'.format(
                    vm_name, provider_key))
                continue

        with lock:
            print('{} finished'.format(provider_key))
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)