Beispiel #1
0
def vm_action(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return HttpResponse("Not authenticated", content_type="text/plain")
    try:
        get_mgmt(current_provider)
    except Exception as e:
        return HttpResponse("Troubles with provider {}: {}".format(
            current_provider, str(e)),
                            content_type="text/plain")
    vm_name = request.POST["vm_name"]
    action = request.POST["action"]
    if action == "poweron":
        anyvm_power_on.delay(current_provider, vm_name)
    elif action == "poweroff":
        anyvm_power_off.delay(current_provider, vm_name)
    elif action == "suspend":
        anyvm_suspend.delay(current_provider, vm_name)
    elif action == "delete":
        anyvm_delete.delay(current_provider, vm_name)
    else:
        HttpResponse("No such action {}!".format(action),
                     content_type="text/plain")
    logger().info("User {} initiated {} on {}@{}".format(
        request.user.username, action, vm_name, current_provider))
    return HttpResponse("Action {} was initiated".format(action),
                        content_type="text/plain")
Beispiel #2
0
    def mgmt(self):
        """ Returns wrapanapi management system class.

        provider_data has higher priority than provider name.
        """
        if self.provider_data:
            return get_mgmt(self.provider_data)
        elif self.provider:
            return get_mgmt(self.provider)
        else:
            logger.error("Please specify provider or provider_data to retrieve it's mgmt.")
            raise TemplateUploadException("Cannot get_mgmt due to empty data.")
Beispiel #3
0
    def get_mgmt_system(self):
        """ Returns the mgmt_system using the :py:func:`utils.providers.get_mgmt` method.
        """
        # gotta stash this in here to prevent circular imports
        from cfme.utils.providers import get_mgmt

        if self.key:
            return get_mgmt(self.key)
        elif getattr(self, 'provider_data', None):
            return get_mgmt(self.provider_data)
        else:
            raise ProviderHasNoKey(
                'Provider {} has no key, so cannot get mgmt system'.format(self.name))
Beispiel #4
0
    def mgmt(self):
        """ Returns the mgmt_system using the :py:func:`utils.providers.get_mgmt` method.
        """
        # gotta stash this in here to prevent circular imports
        from cfme.utils.providers import get_mgmt

        if self.key:
            return get_mgmt(self.key)
        elif getattr(self, 'provider_data', None):
            return get_mgmt(self.provider_data)
        else:
            raise ProviderHasNoKey(
                'Provider {} has no key, so cannot get mgmt system'.format(self.name))
Beispiel #5
0
    def mgmt(self):
        """ Returns wrapanapi management system class.

        provider_data has higher priority than provider name.
        """
        if self.provider_data:
            return get_mgmt(self.provider_data)
        elif self.provider:
            return get_mgmt(self.provider)
        else:
            logger.error(
                "Please specify provider or provider_data to retrieve it's mgmt."
            )
            raise TemplateUploadException("Cannot get_mgmt due to empty data.")
Beispiel #6
0
def main(args):
    """ Cleanup all snapshots name starting with test_snapshot and created by >= 2 hours before

    :param providers: Lists provider keys
    :return:
    """
    for provider_key in args.providers:
        logger.info("Cleaning up {}".format(provider_key))
        provider_obj = get_mgmt(provider_key)

        try:
            images = provider_obj.list_templates()
        except Exception:
            logger.exception("Unable to get the list of templates")
            continue

        for img in images:
            if img.name.startswith(
                    args.name) and img.creation_time < TIME_LIMIT:
                logger.info("Deleting {}".format(img.name))
                try:
                    img.delete()
                except Exception:
                    logger.exception("Snapshot {} Deletion failed".format(
                        img.name))
Beispiel #7
0
def get_provider_templates(provider_key, template_providers,
                           unresponsive_providers, thread_lock):
    # functionalized to make it easy to farm this out to threads
    try:
        with thread_lock:
            provider_mgmt = get_mgmt(provider_key)
        # TODO: change after openshift wrapanapi refactor
        if isinstance(provider_mgmt, Openshift):
            templates = provider_mgmt.list_template()
        else:
            # By default, wrapanapi list_templates() will only list private images on gce/ec2
            templates = [
                template.name for template in provider_mgmt.list_templates()
            ]
        print(provider_key, 'returned {} templates'.format(len(templates)))
        with thread_lock:
            for template in templates:
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if template.lower().endswith('db'):
                    continue
                template_providers[template].append(provider_key)
    except Exception:
        logger.exception('%s\t%s', provider_key, 'exception getting templates')
        with thread_lock:
            unresponsive_providers.add(provider_key)
Beispiel #8
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    state = Appliance.POWER_STATES_MAPPING.get(manager.vm_status(vm_name), "unknown")
    return HttpResponse(state, content_type="text/plain")
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def get_registered_vm_files(provider_key):
    try:
        print("{} processing all the registered files..".format(provider_key))
        vm_registered_files = defaultdict(set)
        provider = get_mgmt(provider_key)
        for vm in provider.list_vms():
            try:
                vm_file_path = vm.get_config_files_path()
                vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path)
                vm_registered_files[vm_directory_name[0]] = vm.name
            except Exception as e:
                logger.error(e)
                logger.error('Failed to get creation/boot time for {} on {}'.format(
                    vm.name, provider_key))
                continue
        print("\n**************************REGISTERED FILES ON {}***********************\n".format(
            provider_key))
        for k, v in vm_registered_files.items():
            print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v))
        return vm_registered_files
    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def get_provider_templates(provider_key, template_providers,
                           unresponsive_providers, thread_lock):
    # functionalized to make it easy to farm this out to threads
    try:
        with thread_lock:
            provider_mgmt = get_mgmt(provider_key)
        if cfme_data['management_systems'][provider_key]['type'] == 'ec2':
            # dirty hack to filter out ec2 public images, because there are literally hundreds.
            templates = provider_mgmt.api.get_all_images(
                owners=['self'], filters={'image-type': 'machine'})
            templates = map(lambda i: i.name or i.id, templates)
        else:
            templates = provider_mgmt.list_template()
        print(provider_key, 'returned {} templates'.format(len(templates)))
        with thread_lock:
            for template in templates:
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if str(template).lower().endswith('db'):
                    continue
                template_providers[template].append(provider_key)
    except:
        print("{}\t{}\t{}".format(provider_key, 'failed:',
                                  traceback.format_exc().splitlines()[-1]))
        with thread_lock:
            unresponsive_providers.add(provider_key)
Beispiel #12
0
def get_provider_templates(provider_key, template_providers,
                           unresponsive_providers, thread_lock):
    # functionalized to make it easy to farm this out to threads
    try:
        with thread_lock:
            provider_mgmt = get_mgmt(provider_key)
        if cfme_data['management_systems'][provider_key]['type'] == 'ec2':
            # dirty hack to filter out ec2 public images, because there are literally hundreds.
            templates = provider_mgmt.api.get_all_images(
                owners=['self'], filters={'image-type': 'machine'})
            templates = map(lambda i: i.name or i.id, templates)
        if cfme_data['management_systems'][provider_key]['type'] == 'gce':
            # get_private_images returns a dictionary with items list that has tuple with list of
            #  template dictionaries in the 1st spot, hence `.items()[0][1]`
            templates = [
                t.get('name')
                for t in provider_mgmt.get_private_images().items()[0][1]
            ]
        else:
            templates = provider_mgmt.list_template()
        logger.info('%s: returned %s templates', provider_key, len(templates))
        with thread_lock:
            for template in templates:
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if str(template).lower().endswith('db'):
                    continue
                template_providers[template].append(provider_key)
    except Exception:
        logger.exception('%s\t%s', provider_key, 'exception getting templates')
        with thread_lock:
            unresponsive_providers.add(provider_key)
def get_provider_templates(provider_key, template_providers, unresponsive_providers, thread_lock):
    # functionalized to make it easy to farm this out to threads
    try:
        with thread_lock:
            provider_mgmt = get_mgmt(provider_key)
        if cfme_data['management_systems'][provider_key]['type'] == 'ec2':
            # dirty hack to filter out ec2 public images, because there are literally hundreds.
            templates = provider_mgmt.api.get_all_images(owners=['self'],
                filters={'image-type': 'machine'})
            templates = map(lambda i: i.name or i.id, templates)
        if cfme_data['management_systems'][provider_key]['type'] == 'gce':
            # get_private_images returns a dictionary with items list that has tuple with list of
            #  template dictionaries in the 1st spot, hence `.items()[0][1]`
            templates = [t.get('name') for t in provider_mgmt.get_private_images().items()[0][1]]
        else:
            templates = provider_mgmt.list_template()
        print(provider_key, 'returned {} templates'.format(len(templates)))
        with thread_lock:
            for template in templates:
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if str(template).lower().endswith('db'):
                    continue
                template_providers[template].append(provider_key)
    except:
        print("{}\t{}\t{}".format(provider_key, 'failed:', traceback.format_exc().splitlines()[-1]))
        with thread_lock:
            unresponsive_providers.add(provider_key)
def templates_on_provider(provider_key, bad_providers):
    """List templates on specific provider"""
    try:
        provider_mgmt = get_mgmt(provider_key)
        # TODO: change after openshift wrapanapi refactor
        templates = (
            provider_mgmt.list_template()
            if isinstance(provider_mgmt, Openshift)
            else [
                template.name
                for template in provider_mgmt.list_templates(
                    **({'executable_by_me': False} if isinstance(provider_mgmt, EC2System) else {})
                )
            ]
        )

        logger.info('%s returned %s templates', provider_key, len(templates))

        return {
            provider_key: [
                t for t in templates
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if not (t.lower().endswith('db') and not t.lower().endswith('extdb'))
            ]
        }

    except Exception:
        logger.exception('%s\t%s', provider_key, 'exception getting templates')
        bad_providers.put(provider_key)
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Append vms meeting criteria to vms_to_delete

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: Uses the Queues to 'return' data
    """
    logger.info('%r: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vm()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%r: Exception listing vms', provider_key)
        return

    text_matched_vms = [name for name in vm_list if match(matchers, name)]
    for name in text_matched_vms:
        match_queue.put(VmProvider(provider_key, name))

    non_text_matching = set(vm_list) - set(text_matched_vms)
    logger.info('%r: NOT matching text filters: %r', provider_key,
                non_text_matching)
    logger.info('%r: MATCHED text filters: %r', provider_key, text_matched_vms)
def templates_on_provider(provider_key, bad_providers):
    """List templates on specific provider"""
    try:
        provider_mgmt = get_mgmt(provider_key)
        # TODO: change after openshift wrapanapi refactor
        templates = (provider_mgmt.list_template() if isinstance(
            provider_mgmt, Openshift) else [
                template.name for template in provider_mgmt.list_templates(
                    **({
                        'executable_by_me': False
                    } if isinstance(provider_mgmt, EC2System) else {}))
            ])

        logger.info('%s returned %s templates', provider_key, len(templates))

        return {
            provider_key: [
                t for t in templates
                # If it ends with 'db', skip it, it's a largedb/nodb variant
                if not (t.lower().endswith('db')
                        and not t.lower().endswith('extdb'))
            ]
        }

    except Exception:
        logger.exception('%s\t%s', provider_key, 'exception getting templates')
        bad_providers.put(provider_key)
Beispiel #17
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    state = Appliance.POWER_STATES_MAPPING.get(manager.vm_status(vm_name), "unknown")
    return HttpResponse(state, content_type="text/plain")
Beispiel #18
0
 def mgmt(self):
     from cfme.utils.providers import get_mgmt
     mgmt_data = self.data
     mgmt_data['hostname'] = self.default_endpoint.hostname
     mgmt_data['username'] = self.default_endpoint.credentials.principal
     mgmt_data['password'] = self.default_endpoint.credentials.secret
     return get_mgmt(mgmt_data)
def cu_vm(provider, vm_name, template):
    """
    Deploys CU VM
    """
    provider_dict = cfme_data['management_systems'][provider]
    # TODO this key isn't in cfme qe yamls
    datastore = provider_dict['cap_and_util']['allowed_datastores']
    resource_pool = provider_dict['cap_and_util']['resource_pool']

    # TODO methods deploy_template calls don't accept resourcepool and  allowed_datastores as kwargs
    deploy_template(provider,
                    vm_name,
                    template,
                    resourcepool=resource_pool,
                    allowed_datastores=datastore)

    prov_mgmt = get_mgmt(provider)
    vm_running(prov_mgmt, vm_name)
    ip = prov_mgmt.get_ip_address(vm_name)

    # TODO this key isn't in cfme qe yamls
    vm_ssh_creds = provider_dict['capandu_vm_creds']
    sshname = credentials[vm_ssh_creds]['username']
    sshpass = credentials[vm_ssh_creds]['password']

    # Create cron jobs to generate disk and network activity on the CU VM.
    with make_ssh_client(ip, sshname, sshpass) as ssh_client:
        try:
            config_cu_vm(ssh_client)
        except CUCommandException:
            _vm_cleanup(prov_mgmt, vm_name)
            raise

    vm_running(prov_mgmt, vm_name)
Beispiel #20
0
 def mgmt(self):
     from cfme.utils.providers import get_mgmt
     mgmt_data = self.data
     mgmt_data['hostname'] = self.default_endpoint.hostname
     mgmt_data['username'] = self.default_endpoint.credentials.principal
     mgmt_data['password'] = self.default_endpoint.credentials.secret
     return get_mgmt(mgmt_data)
def get_registered_vm_files(provider_key):
    try:
        print("{} processing all the registered files..".format(provider_key))
        vm_registered_files = defaultdict(set)
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            try:
                vm_file_path = provider.get_vm_config_files_path(vm_name)
                vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path)
                vm_registered_files[vm_directory_name[0]] = vm_name
            except Exception as e:
                logger.error(e)
                logger.error('Failed to get creation/boot time for {} on {}'.format(
                    vm_name, provider_key))
                continue
        print("\n**************************REGISTERED FILES ON {}***********************\n".format(
            provider_key))
        for k, v in vm_registered_files.items():
            print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v))
        return vm_registered_files
    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Append vms meeting criteria to vms_to_delete

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: Uses the Queues to 'return' data
    """
    logger.info('%r: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vm()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%r: Exception listing vms', provider_key)
        return

    text_matched_vms = [name for name in vm_list if match(matchers, name)]
    for name in text_matched_vms:
        match_queue.put(VmProvider(provider_key, name))

    non_text_matching = set(vm_list) - set(text_matched_vms)
    logger.info('%r: NOT matching text filters: %r', provider_key, non_text_matching)
    logger.info('%r: MATCHED text filters: %r', provider_key, text_matched_vms)
def get_datastores_per_host(provider_key):
    logger.info('%s processing to get datastores per host', provider_key)
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {
            host: provider.list_host_datastore_url(host)
            for host in hosts
        }
        unregistered_files = []

        logger.info(
            "\n*********************UNREGISTERED FILES ON: %s**********************\n",
            provider_key)
        logger.info(
            'HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception:
                logger.exception(
                    'Exception calling list_orphaned_files_per_host')
                continue

    except Exception:
        logger.exception('failed to process vms from provider %s',
                         provider_key)
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
Beispiel #25
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis,
               exclude_stacks, stack_template, output):
    with open(output, 'w') as report:
        report.write(
            'ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup'
        )
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        logger.info("----- Provider: %r -----", provider_key)
        logger.info("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        logger.info("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        logger.info("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        logger.info("Deleting old stacks...")
        delete_stacks(provider_mgmt=provider_mgmt,
                      excluded_stacks=exclude_stacks,
                      stack_template=stack_template,
                      output=output)
        logger.info("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
def cu_vm(provider, vm_name, template):
    """
    Deploys CU VM
    """
    provider_dict = cfme_data['management_systems'][provider]
    # TODO this key isn't in cfme qe yamls
    datastore = provider_dict['cap_and_util']['allowed_datastores']
    resource_pool = provider_dict['cap_and_util']['resource_pool']

    # TODO methods deploy_template calls don't accept resourcepool and  allowed_datastores as kwargs
    deploy_template(provider, vm_name, template,
                    resourcepool=resource_pool, allowed_datastores=datastore)

    prov_mgmt = get_mgmt(provider)
    vm_running(prov_mgmt, vm_name)
    ip = prov_mgmt.get_ip_address(vm_name)

    # TODO this key isn't in cfme qe yamls
    vm_ssh_creds = provider_dict['capandu_vm_creds']
    sshname = credentials[vm_ssh_creds]['username']
    sshpass = credentials[vm_ssh_creds]['password']

    # Create cron jobs to generate disk and network activity on the CU VM.
    with make_ssh_client(ip, sshname, sshpass) as ssh_client:
        try:
            config_cu_vm(ssh_client)
        except CUCommandException:
            _vm_cleanup(prov_mgmt, vm_name)
            raise

    vm_running(prov_mgmt, vm_name)
Beispiel #27
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis, exclude_stacks,
               stack_template, output):
    with open(output, 'w') as report:
        report.write('ec2cleanup.py, Address, Volume, LoadBalancer and Network Interface Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        logger.info("----- Provider: %r -----", provider_key)
        logger.info("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        logger.info("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        logger.info("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        logger.info("Deleting old stacks...")
        delete_stacks(provider_mgmt=provider_mgmt,
                      excluded_stacks=exclude_stacks,
                      stack_template=stack_template,
                      output=output)
        logger.info("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
Beispiel #28
0
def azure_cleanup(nic_template, pip_template, days_old):
    logger.info('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
    logger.info("Date: {}".format(datetime.now()))
    errors = []
    for prov_key in list_provider_keys('azure'):
        logger.info("----- Provider: '%s' -----", prov_key)
        mgmt = get_mgmt(prov_key)
        mgmt.logger = logger
        for name, scr_id in mgmt.list_subscriptions():
            logger.info("Subscription '%s' is chosen", name)
            setattr(mgmt, 'subscription_id', scr_id)
            for resource_group in mgmt.list_resource_groups():
                mgmt.logger.info('Checking "%s" resource group:',
                                 resource_group)

                # removing stale nics
                try:
                    mgmt.remove_nics_by_search(nic_template, resource_group)
                except Exception as e:
                    logger.warning("NIC cleanup failed")
                    errors.append(e)

                # removing public ips
                try:
                    mgmt.remove_pips_by_search(pip_template, resource_group)
                except Exception as e:
                    logger.warning("Public IP cleanup failed")
                    errors.append(e)

                # removing stale stacks
                try:
                    stack_list = mgmt.list_stack(resource_group=resource_group,
                                                 days_old=days_old)
                    if stack_list:
                        removed_stacks = []
                        for stack in stack_list:
                            if mgmt.is_stack_empty(
                                    stack, resource_group=resource_group):
                                removed_stacks.append(stack)
                                mgmt.delete_stack(stack, resource_group)

                        if not removed_stacks:
                            logger.info(
                                "No empty stacks older '%s' days were found",
                                days_old)
                except Exception as e:
                    logger.warning("Removing Stacks failed")
                    errors.append(e)
                try:
                    mgmt.remove_unused_blobs(resource_group)
                except Exception as e:
                    logger.warning("Removing unused blobs failed")
                    errors.append(e)
    if errors:
        tb.format_exc()
        return 1
    else:
        return 0
Beispiel #29
0
def vms_table(request, current_provider=None):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    try:
        manager = get_mgmt(current_provider)
        vms = sorted([vm.name for vm in manager.list_vms()])
        return render(request, 'appliances/vms/_list.html', locals())
    except Exception as e:
        return HttpResponse('{}: {}'.format(type(e).__name__, str(e)), content_type="text/plain")
Beispiel #30
0
 def mgmt(self):
     from cfme.utils.providers import get_mgmt
     d = copy.deepcopy(self.data)
     d['type'] = self.type_name
     d['hostname'] = self.default_endpoint.hostname
     d['api_port'] = self.default_endpoint.api_port
     d['security_protocol'] = self.default_endpoint.security_protocol
     d['credentials'] = self.default_endpoint.credentials
     return get_mgmt(d)
Beispiel #31
0
def vms_table(request, current_provider=None):
    if not request.user.is_authenticated or not request.user.is_superuser:
        return go_home(request)
    try:
        manager = get_mgmt(current_provider)
        vms = sorted([vm.name for vm in manager.list_vms()])
        return render(request, 'appliances/vms/_list.html', locals())
    except Exception as e:
        return HttpResponse('{}: {}'.format(type(e).__name__, str(e)), content_type="text/plain")
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:

        # If a provider was passed, only cleanup on it, otherwise all rhevm providers
        cli_provider = kwargs.get('provider')
        if cli_provider and cli_provider != provider:
            continue

        print("\n--------Start of {}--------".format(provider))

        provider_mgmt = get_mgmt(provider)

        try:
            # Raise exceptions here to log the end of provider section
            if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress')):
                raise ValueError('Failed to ping provider.')
            elif not is_ovirt_engine_running(provider_mgmt):
                print('ovirt-engine service not running..')
                raise ValueError('ovirt-engine not running on provider')

            print('connecting to provider, to establish api handler')
            edomain = kwargs.get('edomain')
            if not edomain:
                edomain = provider_mgmt.kwargs['template_upload']['edomain']
            # Test API connection to provider, raises RequestError
            provider_mgmt.api  # noqa
        except Exception as e:
            print('Failed connecting to provider: {}'.format(e))
            logger.exception(e)
            print("--------FAILURE End of {}--------\n".format(provider))
            continue

        try:
            cleanup_templates(provider_mgmt.api,
                              edomain,
                              kwargs.get('days_old'),
                              kwargs.get('max_templates'))
        finally:
            change_edomain_state(provider_mgmt,
                                 'maintenance',
                                 edomain)
            cleanup_empty_dir_on_edomain(provider_mgmt, edomain)

            change_edomain_state(provider_mgmt,
                                 'active',
                                 edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
Beispiel #33
0
 def mgmt(self):
     from cfme.utils.providers import get_mgmt
     d = copy.deepcopy(self.data)
     d['type'] = self.type_name
     d['hostname'] = self.default_endpoint.hostname
     d['api_port'] = self.default_endpoint.api_port
     d['security_protocol'] = self.default_endpoint.security_protocol
     d['credentials'] = self.default_endpoint.credentials
     return get_mgmt(d)
Beispiel #34
0
 def mgmt(self):
     from cfme.utils.providers import get_mgmt
     d = self.data
     d['hostname'] = self.default_endpoint.hostname
     d['api_port'] = self.default_endpoint.api_port
     d['security_protocol'] = self.default_endpoint.security_protocol
     d['username'] = self.default_endpoint.credentials.principal
     d['password'] = self.default_endpoint.credentials.secret
     return get_mgmt(d)
Beispiel #35
0
def list_vms(provider_key, output_queue):
    """
    List all the vms/instances on the given provider key
    Build list of lists with basic vm info: [[provider, vm, status, age, type], [etc]]
    :param provider_key: string provider key
    :param output_queue: a multiprocessing.Queue object to add results to
    :return: list of lists of vms and basic statistics
    """
    output_list = []

    print('Listing VMS on provider {}'.format(provider_key))
    provider = get_mgmt(provider_key)
    try:
        vm_list = provider.list_vm()
    except NotImplementedError:
        print('Provider does not support list_vm: {}'.format(provider_key))
        output_list.append([provider_key, 'Not Supported', NULL, NULL, NULL])
        return
    else:
        # TODO thread metadata collection for further speed improvements
        for vm_name in vm_list:
            # Init these meta values in case they fail to query
            status, creation, vm_type = None, None, None
            try:
                print('Collecting metadata for VM {} on provider {}'.format(vm_name, provider_key))
                # VMError raised for some vms in bad status
                # exception message contains useful information about VM status
                try:
                    status = provider.vm_status(vm_name)
                except VMError as ex:
                    status = ex.message

                creation = provider.vm_creation_time(vm_name)

                # different provider types implement different methods to get instance type info
                try:
                    vm_type = provider.vm_type(vm_name)
                except (AttributeError, NotImplementedError):
                    vm_type = provider.vm_hardware_configuration(vm_name)
                finally:
                    vm_type = vm_type or '--'

            except Exception as ex:
                print('Exception during provider processing on {}: {}'
                      .format(provider_key, ex.message))
                continue
            finally:
                # Add the VM to the list anyway, we just might not have all metadata
                output_list.append([provider_key,
                                    vm_name,
                                    status or NULL,
                                    creation or NULL,
                                    str(vm_type) or NULL])

    output_queue.put(output_list)
    return
Beispiel #36
0
def call_provider(provider_name, action, *args, **kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    provider = get_mgmt(provider_name)

    try:
        call = getattr(provider, action)
    except AttributeError:
        raise Exception('Action {} not found'.format(repr(action)))
    return call(*args, **kwargs)
Beispiel #37
0
def call_provider(provider_name, action, *args, **kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    provider = get_mgmt(provider_name)

    try:
        call = getattr(provider, action)
    except AttributeError:
        raise Exception('Action {} not found'.format(repr(action)))
    return call(*args, **kwargs)
def run(**kwargs):
    """Calls the functions needed to cleanup templates on RHEVM providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
    """
    providers = cfme_data['management_systems']
    for provider in [
            prov for prov in providers if providers[prov]['type'] == 'rhevm'
    ]:

        # If a provider was passed, only cleanup on it, otherwise all rhevm providers
        cli_provider = kwargs.get('provider')
        if cli_provider and cli_provider != provider:
            continue

        print("\n--------Start of {}--------".format(provider))

        provider_mgmt = get_mgmt(provider)

        try:
            # Raise exceptions here to log the end of provider section
            if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress')):
                raise ValueError('Failed to ping provider.')
            elif not is_ovirt_engine_running(provider_mgmt):
                print('ovirt-engine service not running..')
                raise ValueError('ovirt-engine not running on provider')

            print('connecting to provider, to establish api handler')
            edomain = kwargs.get('edomain')
            if not edomain:
                edomain = provider_mgmt.kwargs['template_upload']['edomain']
            # Test API connection to provider, raises RequestError
            provider_mgmt.api  # noqa
        except Exception as e:
            print('Failed connecting to provider: {}'.format(e))
            logger.exception(e)
            print("--------FAILURE End of {}--------\n".format(provider))
            continue

        try:
            cleanup_templates(provider_mgmt.api, edomain,
                              kwargs.get('days_old'),
                              kwargs.get('max_templates'))
        finally:
            change_edomain_state(provider_mgmt, 'maintenance', edomain)
            cleanup_empty_dir_on_edomain(provider_mgmt, edomain)

            change_edomain_state(provider_mgmt, 'active', edomain)
            print("--------End of {}--------\n".format(provider))

    print("Provider Execution completed")
Beispiel #39
0
def power_state_buttons(request, current_provider):
    if not request.user.is_authenticated or not request.user.is_superuser:
        return go_home(request)
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
def run(domain, provider_key, days_old, max_templates):
    """Calls the functions needed to cleanup templates on RHV providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        domain: see argparse
        provider_key: see argparse
        days_old: see argparse
        max_templates: see argparse
    """
    providers = cfme_data['management_systems']
    for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:

        # If a provider was passed, only cleanup on it, otherwise all RHV providers
        if provider_key and provider_key != provider:
            logger.info('Skipping provider [%s], does not match passed key', provider)
            continue

        domain_to_clean = (domain or
                           providers[provider].get('template_upload', {}).get('edomain'))
        if domain_to_clean is None:
            logger.info('Skipping provider [%s], no domain under template_upload', provider)
            continue

        logger.info("\n--------Start of %s--------", provider)
        provider_mgmt = get_mgmt(provider)

        try:
            # Test API connection to provider, raises RequestError
            prov_test = provider_mgmt.api.test()  # noqa
        except Exception:
            prov_test = False

        if prov_test is False:
            logger.exception('Failed connecting to provider')
            logger.info("-------- FAILURE End of %s --------\n", provider)
            continue

        cleanup = False
        try:
            cleanup = cleanup_templates(provider_mgmt, domain_to_clean, days_old, max_templates)
        finally:
            if cleanup:
                logger.info('Changing domain to maintenance mode: %s', domain_to_clean)
                provider_mgmt.change_storage_domain_state('maintenance', domain_to_clean)

                cleanup_empty_dir_on_domain(provider_mgmt, domain_to_clean)

                logger.info('Changing domain to active mode: %s', domain_to_clean)
                provider_mgmt.change_storage_domain_state('active', domain_to_clean)
            logger.info("-------- End of %s --------\n", provider)

    logger.info("Provider cleanup completed")
def run(domain, provider_key, days_old, max_templates):
    """Calls the functions needed to cleanup templates on RHV providers.
       This is called either by template_upload_all script, or by main
       function.

    Args:
        domain: see argparse
        provider_key: see argparse
        days_old: see argparse
        max_templates: see argparse
    """
    providers = cfme_data['management_systems']
    for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:

        # If a provider was passed, only cleanup on it, otherwise all RHV providers
        if provider_key and provider_key != provider:
            logger.info('Skipping provider [%s], does not match passed key', provider)
            continue

        domain_to_clean = (domain or
                           providers[provider].get('template_upload', {}).get('edomain'))
        if domain_to_clean is None:
            logger.info('Skipping provider [%s], no domain under template_upload', provider)
            continue

        logger.info("\n--------Start of %s--------", provider)
        provider_mgmt = get_mgmt(provider)

        try:
            # Test API connection to provider, raises RequestError
            prov_test = provider_mgmt.api.test()  # noqa
        except Exception:
            prov_test = False

        if prov_test is False:
            logger.exception('Failed connecting to provider')
            logger.info("-------- FAILURE End of %s --------\n", provider)
            continue

        cleanup = False
        try:
            cleanup = cleanup_templates(provider_mgmt, domain_to_clean, days_old, max_templates)
        finally:
            if cleanup:
                logger.info('Changing domain to maintenance mode: %s', domain_to_clean)
                provider_mgmt.change_storage_domain_state('maintenance', domain_to_clean)

                cleanup_empty_dir_on_domain(provider_mgmt, domain_to_clean)

                logger.info('Changing domain to active mode: %s', domain_to_clean)
                provider_mgmt.change_storage_domain_state('active', domain_to_clean)
            logger.info("-------- End of %s --------\n", provider)

    logger.info("Provider cleanup completed")
Beispiel #42
0
def power_state_buttons(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    manager = get_mgmt(current_provider)
    vm_name = request.POST["vm_name"]
    power_state = request.POST["power_state"]
    can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
    can_power_off = power_state in {Appliance.Power.ON}
    can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
    can_delete = power_state in {Appliance.Power.OFF}
    return render(request, 'appliances/vms/_buttons.html', locals())
def azure_cleanup(nic_template, pip_template, days_old):
        logger.info('azure_cleanup.py, NICs, PIPs, Disks and Stack Cleanup')
        logger.info("Date: {}".format(datetime.now()))
        errors = []
        for prov_key in list_provider_keys('azure'):
            logger.info("----- Provider: '%s' -----", prov_key)
            mgmt = get_mgmt(prov_key)
            mgmt.logger = logger
            for name, scr_id in mgmt.list_subscriptions():
                logger.info("Subscription '%s' is chosen", name)
                setattr(mgmt, 'subscription_id', scr_id)
                for resource_group in mgmt.list_resource_groups():
                    mgmt.logger.info('Checking "%s" resource group:', resource_group)

                    # removing stale nics
                    try:
                        mgmt.remove_nics_by_search(nic_template, resource_group)
                    except Exception as e:
                        logger.exception("NIC cleanup failed")
                        errors.append(e)

                    # removing public ips
                    try:
                        mgmt.remove_pips_by_search(pip_template, resource_group)
                    except Exception as e:
                        logger.exception("Public IP cleanup failed")
                        errors.append(e)

                    # removing stale stacks
                    try:
                        stack_list = mgmt.list_stack(resource_group=resource_group,
                                                 days_old=days_old)
                        if stack_list:
                            removed_stacks = []
                            for stack in stack_list:
                                if mgmt.is_stack_empty(stack, resource_group=resource_group):
                                    removed_stacks.append(stack)
                                    mgmt.delete_stack(stack, resource_group)

                            if not removed_stacks:
                                logger.info("No empty stacks older '%s' days were found", days_old)
                    except Exception as e:
                        logger.exception("Removing Stacks failed")
                        errors.append(e)
                    try:
                        mgmt.remove_unused_blobs(resource_group)
                    except Exception as e:
                        logger.exception("Removing unused blobs failed")
                        errors.append(e)
        if errors:
            logger.error("Hit exceptions during cleanup! See logs.")
            return 1
        else:
            return 0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--provider', dest='provider_name', help='provider name in cfme_data')
    parser.add_argument('--vm_name', help='the name of the VM on which to act')
    parser.add_argument('--remove', help='remove disk from vm', action="store_true")
    args = parser.parse_args()

    provider = get_mgmt(args.provider_name)

    provider.connect_direct_lun_to_appliance(args.vm_name, args.remove)
Beispiel #45
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    # TODO: change after openshift wrapanapi refactor
    if isinstance(manager, Openshift):
        state = manager.vm_status(vm_name)
    else:
        state = manager.get_vm(vm_name).state
    state = Appliance.POWER_STATES_MAPPING.get(state, "unknown")
    return HttpResponse(state, content_type="text/plain")
Beispiel #46
0
def power_state(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return go_home(request)
    vm_name = request.POST["vm_name"]
    manager = get_mgmt(current_provider)
    # TODO: change after openshift wrapanapi refactor
    if isinstance(manager, Openshift):
        state = manager.vm_status(vm_name)
    else:
        state = manager.get_vm(vm_name).state
    state = Appliance.POWER_STATES_MAPPING.get(state, "unknown")
    return HttpResponse(state, content_type="text/plain")
Beispiel #47
0
def scan_vm(provider_key, vm_name, delta, match_queue, scan_failure_queue):
    """Scan an individual VM for age

    Args:
        vm_name (str) Name of the VM to scan
        delta (datetime.timedelta) The timedelta to compare age against for matches
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age

    Returns:
        None: Uses the Queues to 'return' data
    """
    now = datetime.datetime.now(tz=pytz.UTC)
    # Nested exceptions to try and be safe about the scanned values and to get complete results
    failure = False
    status = NULL
    logger.info('%r: Scan VM %r...', provider_key, vm_name)
    try:
        vm = get_mgmt(provider_key).get_vm(vm_name)
        # Localize to UTC
        vm_creation_time = vm.creation_time
    except VMInstanceNotFound:
        logger.exception('%r: could not locate VM %s', provider_key, vm_name)
        failure = True
    except Exception:  # noqa
        failure = True
        logger.exception('%r: Exception getting creation time for %r',
                         provider_key, vm_name)
        # This VM must have some problem, include in report even though we can't delete
        try:
            status = vm.state
        except Exception:  # noqa
            failure = True
            logger.exception('%r: Exception getting status for %r',
                             provider_key, vm_name)
            status = NULL
    finally:
        if failure:
            scan_failure_queue.put(
                VmReport(provider_key, vm_name, FAIL, status, NULL))
            return

    vm_delta = now - vm_creation_time
    logger.info('%r: VM %r age: %s', provider_key, vm_name, vm_delta)
    data = VmData(provider_key, vm_name, str(vm_delta))

    # test age to determine which queue it goes in
    if delta < vm_delta:
        match_queue.put(data)
    else:
        logger.info('%r: VM %r did not match age requirement', provider_key,
                    vm.name)
def main(*providers):
    for provider_key in list_provider_keys('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))
Beispiel #49
0
def vm_action(request, current_provider):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        return HttpResponse("Not authenticated", content_type="text/plain")
    try:
        get_mgmt(current_provider)
    except Exception as e:
        return HttpResponse(
            "Troubles with provider {}: {}".format(current_provider, str(e)),
            content_type="text/plain")
    vm_name = request.POST["vm_name"]
    action = request.POST["action"]
    if action == "poweron":
        anyvm_power_on.delay(current_provider, vm_name)
    elif action == "poweroff":
        anyvm_power_off.delay(current_provider, vm_name)
    elif action == "suspend":
        anyvm_suspend.delay(current_provider, vm_name)
    elif action == "delete":
        anyvm_delete.delay(current_provider, vm_name)
    else:
        HttpResponse("No such action {}!".format(action), content_type="text/plain")
    logger().info("User {} initiated {} on {}@{}".format(
        request.user.username, action, vm_name, current_provider))
    return HttpResponse("Action {} was initiated".format(action), content_type="text/plain")
def main(*providers):
    for provider_key in list_provider_keys('openstack'):
        print('Checking {}'.format(provider_key))
        api = get_mgmt(provider_key).api
        try:
            fips = api.floating_ips.findall(fixed_ip=None)
        except Exception:
            print('Unable to get fips for {}:'.format(provider_key))
            print(format_exc().splitlines()[-1])
            continue

        for fip in fips:
            print('Deleting {} on {}'.format(fip.ip, provider_key))
            fip.delete()
            print('{} deleted'.format(fip.ip))
Beispiel #51
0
def scan_vm(provider_key, vm_name, delta, match_queue, scan_failure_queue):
    """Scan an individual VM for age

    Args:
        vm_name (str) Name of the VM to scan
        delta (datetime.timedelta) The timedelta to compare age against for matches
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age

    Returns:
        None: Uses the Queues to 'return' data
    """
    now = datetime.datetime.now(tz=pytz.UTC)
    # Nested exceptions to try and be safe about the scanned values and to get complete results
    failure = False
    status = NULL
    logger.info('%r: Scan VM %r...', provider_key, vm_name)
    try:
        vm = get_mgmt(provider_key).get_vm(vm_name)
        # Localize to UTC
        vm_creation_time = vm.creation_time
    except VMInstanceNotFound:
        logger.exception('%r: could not locate VM %s', provider_key, vm_name)
        failure = True
    except Exception:  # noqa
        failure = True
        logger.exception('%r: Exception getting creation time for %r', provider_key, vm_name)
        # This VM must have some problem, include in report even though we can't delete
        try:
            status = vm.state
        except Exception:  # noqa
            failure = True
            logger.exception('%r: Exception getting status for %r', provider_key, vm_name)
            status = NULL
    finally:
        if failure:
            scan_failure_queue.put(VmReport(provider_key, vm_name, FAIL, status, NULL))
            return

    vm_delta = now - vm_creation_time
    logger.info('%r: VM %r age: %s', provider_key, vm_name, vm_delta)
    data = VmData(provider_key, vm_name, str(vm_delta))

    # test age to determine which queue it goes in
    if delta < vm_delta:
        match_queue.put(data)
    else:
        logger.info('%r: VM %r did not match age requirement', provider_key, vm.name)
Beispiel #52
0
def delete_vm(provider_key, vm_name, age, result_queue):
    """ Delete the given vm_name from the provider via REST interface

    Args:
        provider_key (string): name of the provider from yaml
        vm_name (string): name of the vm to delete
        age (string): age of the VM to delete
        result_queue (Queue.Queue): MP Queue to store the VmReport tuple on delete result
    Returns:
        None: Uses the Queues to 'return' data
    """
    # diaper exceptions here to handle anything and continue.
    try:
        vm = get_mgmt(provider_key).get_vm(vm_name)
        status = vm.state
    except VMInstanceNotFound:
        logger.exception('%r: could not locate VM %s', provider_key, vm_name)
        # no reason to continue after this, nothing to try and delete
        result_queue.put(VmReport(provider_key, vm_name, None, None, FAIL))
        return
    except Exception:  # noqa
        status = FAIL
        logger.exception('%r: Exception getting status for %r', provider_key,
                         vm_name)
        # keep going, try to delete anyway

    logger.info("%r: Deleting %r, age: %r, status: %r", provider_key, vm.name,
                age, status)
    try:
        # delete vm returns boolean based on success
        if vm.cleanup():
            result = PASS
            logger.info('%r: Delete success: %r', provider_key, vm.name)
        else:
            result = FAIL
            logger.error('%r: Delete failed: %r', provider_key, vm.name)
    except Exception:  # noqa
        # TODO vsphere delete failures, workaround for wrapanapi issue #154
        if not vm.exists:
            # The VM actually has been deleted
            result = PASS
        else:
            result = FAIL  # set this here to cover anywhere the exception could happen
        logger.exception(
            '%r: Exception during delete: %r, double check result: %r',
            provider_key, vm.name, result)
    finally:
        result_queue.put(VmReport(provider_key, vm.name, age, status, result))
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov = get_mgmt(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov.get_ip_address(depot_machine_name)
    prov.delete_vm(depot_machine_name)
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov = get_mgmt(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov.get_ip_address(depot_machine_name)
    prov.delete_vm(depot_machine_name)
Beispiel #55
0
def proxy_machine():
    """ Deploy vm for proxy test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = random_vm_name('proxy')
    data = conf.cfme_data.get("proxy_template")
    proxy_provider_key = data["provider"]
    proxy_template_name = data["template_name"]
    proxy_port = data['port']
    prov = get_mgmt(proxy_provider_key)
    deploy_template(proxy_provider_key,
                    depot_machine_name,
                    template_name=proxy_template_name)
    yield prov.get_ip_address(depot_machine_name), proxy_port
    prov.delete_vm(depot_machine_name)
Beispiel #56
0
def delete_vm(provider_key, vm_name, age, result_queue):
    """ Delete the given vm_name from the provider via REST interface

    Args:
        provider_key (string): name of the provider from yaml
        vm_name (string): name of the vm to delete
        age (string): age of the VM to delete
        result_queue (Queue.Queue): MP Queue to store the VmReport tuple on delete result
    Returns:
        None: Uses the Queues to 'return' data
    """
    # diaper exceptions here to handle anything and continue.
    try:
        vm = get_mgmt(provider_key).get_vm(vm_name)
        status = vm.state
    except VMInstanceNotFound:
        logger.exception('%r: could not locate VM %s', provider_key, vm_name)
        # no reason to continue after this, nothing to try and delete
        result_queue.put(VmReport(provider_key, vm_name, None, None, FAIL))
        return
    except Exception:  # noqa
        status = FAIL
        logger.exception('%r: Exception getting status for %r', provider_key, vm_name)
        # keep going, try to delete anyway

    logger.info("%r: Deleting %r, age: %r, status: %r", provider_key, vm.name, age, status)
    try:
        # delete vm returns boolean based on success
        if vm.cleanup():
            result = PASS
            logger.info('%r: Delete success: %r', provider_key, vm.name)
        else:
            result = FAIL
            logger.error('%r: Delete failed: %r', provider_key, vm.name)
    except Exception:  # noqa
        # TODO vsphere delete failures, workaround for wrapanapi issue #154
        if not vm.exists:
            # The VM actually has been deleted
            result = PASS
        else:
            result = FAIL  # set this here to cover anywhere the exception could happen
        logger.exception('%r: Exception during delete: %r, double check result: %r',
                         provider_key, vm.name, result)
    finally:
        result_queue.put(VmReport(provider_key, vm.name, age, status, result))
def main(*providers):
    for provider_key in providers:
        print("Cleaning up {}".format(provider_key))
        api = get_mgmt(provider_key).capi
        try:
            volumes = api.volumes.findall(attachments=[])
        except Exception as e:
            print("Connect to provider failed:{} {} {}".format(
                provider_key, type(e).__name__, str(e)))
            continue

        for volume in volumes:
            if iso8601.parse_date(volume.created_at) < (datetime.now(tz=local_tz) - GRACE_TIME):
                print("Deleting {}".format(volume.id))
                try:
                    volume.delete()
                except Exception as e:
                    print("Delete failed: {} {}".format(type(e).__name__, str(e)))
def list_vms(provider_key, output_queue):
    """
    List all the vms/instances on the given provider key
    Build list of lists with basic vm info: [[provider, vm, status, age, type], [etc]]
    :param provider_key: string provider key
    :param output_queue: a multiprocessing.Queue object to add results to
    :return: list of lists of vms and basic statistics
    """
    output_list = []

    print('Listing VMS on provider {}'.format(provider_key))
    provider = get_mgmt(provider_key)
    # TODO thread metadata collection for further speed improvements
    for vm in provider.list_vms():
        # Init these meta values in case they fail to query
        status, creation, vm_type = None, None, None
        try:
            print('Collecting metadata for VM {} on provider {}'.format(vm.name, provider_key))
            status = vm.state
            creation = vm.creation_time

            # different provider types implement different methods to get instance type info
            if hasattr(vm, 'type'):
                vm_type = vm.type
            else:
                try:
                    vm_type = vm.get_hardware_configuration()
                except (AttributeError, NotImplementedError):
                    vm_type = '--'

        except Exception as ex:
            print('Exception during provider processing on {}: {}'
                  .format(provider_key, ex.message))
            continue
        finally:
            # Add the VM to the list anyway, we just might not have all metadata
            output_list.append([provider_key,
                                vm.name,
                                status or NULL,
                                creation or NULL,
                                str(vm_type) or NULL])

    output_queue.put(output_list)
    return