Example #1
0
    def pytest_sessionfinish(self, exitstatus):
        # Now master/standalone needs to move all the reports to an appliance for the source report
        if store.parallelizer_role != 'master':
            manager().collect()

        # for slaves, everything is done at this point
        if store.parallelizer_role == 'slave':
            return

        # on master/standalone, merge all the collected reports and bring them back
        manager().merge()

        try:
            global ui_coverage_percent
            last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open())
            ui_coverage_percent = last_run['result']['covered_percent']
            style = {'bold': True}
            if ui_coverage_percent > 40:
                style['green'] = True
            else:
                style['red'] = True
            store.write_line('UI Coverage Result: {}%'.format(ui_coverage_percent),
                **style)
        except Exception as ex:
            logger.error('Error printing coverage report to terminal')
            logger.exception(ex)
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, six.string_types) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']['small_template']['name'])
        except KeyError:
            raise KeyError('small_template not defined for Provider {} in cfme_data.yaml'
                .format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s", vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                vm_name, type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def appliance_police(appliance):
    if not store.slave_manager:
        return
    try:
        available_ports = {
            'ssh': (appliance.hostname, appliance.ssh_port),
            'https': (appliance.hostname, appliance.ui_port),
            'postgres': (appliance.db_host or appliance.hostname, appliance.db_port)}
        port_results = {pn: net_check(addr=p_addr, port=p_port, force=True)
                        for pn, (p_addr, p_port) in available_ports.items()}
        for port, result in port_results.items():
            if port == 'ssh' and appliance.is_pod:
                # ssh is not available for podified appliance
                continue
            if not result:
                raise AppliancePoliceException('Unable to connect', available_ports[port][1])

        try:
            status_code = requests.get(appliance.url, verify=False,
                                       timeout=120).status_code
        except Exception:
            raise AppliancePoliceException('Getting status code failed',
                                           available_ports['https'][1])

        if status_code != 200:
            raise AppliancePoliceException('Status code was {}, should be 200'.format(
                status_code), available_ports['https'][1])
        return
    except AppliancePoliceException as e:
        # special handling for known failure conditions
        if e.port == 443:
            # Lots of rdbs lately where evm seems to have entirely crashed
            # and (sadly) the only fix is a rude restart
            appliance.restart_evm_service(rude=True)
            try:
                appliance.wait_for_web_ui(900)
                store.write_line('EVM was frozen and had to be restarted.', purple=True)
                return
            except TimedOutError:
                pass
        e_message = str(e)
    except Exception as e:
        e_message = str(e)

    # Regardles of the exception raised, we didn't return anywhere above
    # time to call a human
    msg = 'Help! My appliance {} crashed with: {}'.format(appliance.url, e_message)
    store.slave_manager.message(msg)
    if 'appliance_police_recipients' in rdb:
        rdb_kwargs = {
            'subject': 'RDB Breakpoint: Appliance failure',
            'recipients': rdb.appliance_police_recipients,
        }
    else:
        rdb_kwargs = {}
    Rdb(msg).set_trace(**rdb_kwargs)
    store.slave_manager.message('Resuming testing following remote debugging')
Example #4
0
def appliance_police():
    if not store.slave_manager:
        return
    try:
        port_numbers = {
            'ssh': ports.SSH,
            'https': store.current_appliance.ui_port,
            'postgres': ports.DB}
        port_results = {pn: net_check(pp, force=True) for pn, pp in port_numbers.items()}
        for port, result in port_results.items():
            if not result:
                raise _AppliancePoliceException('Port {} was not contactable'.format(port),
                    port_numbers[port])

        try:
            status_code = requests.get(store.current_appliance.url, verify=False,
                                       timeout=120).status_code
        except Exception:
            raise _AppliancePoliceException('Getting status code failed', port_numbers['https'])

        if status_code != 200:
            raise _AppliancePoliceException('Status code was {}, should be 200'.format(
                status_code), port_numbers['https'])
        return
    except _AppliancePoliceException as e:
        # special handling for known failure conditions
        if e.port == 443:
            # Lots of rdbs lately where evm seems to have entirely crashed
            # and (sadly) the only fix is a rude restart
            store.current_appliance.restart_evm_service(rude=True)
            try:
                store.current_appliance.wait_for_web_ui(900)
                store.write_line('EVM was frozen and had to be restarted.', purple=True)
                return
            except TimedOutError:
                pass
        e_message = e.message
    except Exception as e:
        e_message = e.args[0]

    # Regardles of the exception raised, we didn't return anywhere above
    # time to call a human
    msg = 'Help! My appliance {} crashed with: {}'.format(store.current_appliance.url, e_message)
    store.slave_manager.message(msg)
    if 'appliance_police_recipients' in rdb:
        rdb_kwargs = {
            'subject': 'RDB Breakpoint: Appliance failure',
            'recipients': rdb.appliance_police_recipients,
        }
    else:
        rdb_kwargs = {}
    Rdb(msg).set_trace(**rdb_kwargs)
    store.slave_manager.message('Resuming testing following remote debugging')
Example #5
0
def appliance_police():
    if not store.slave_manager:
        return
    try:
        port_numbers = {
            'ssh': ports.SSH,
            'https': store.current_appliance.ui_port,
            'postgres': ports.DB}
        port_results = {pn: net_check(pp, force=True) for pn, pp in port_numbers.items()}
        for port, result in port_results.items():
            if not result:
                raise _AppliancePoliceException('Port {} was not contactable'.format(port),
                    port_numbers[port])

        try:
            status_code = requests.get(store.current_appliance.url, verify=False,
                                       timeout=120).status_code
        except Exception:
            raise _AppliancePoliceException('Getting status code failed', port_numbers['https'])

        if status_code != 200:
            raise _AppliancePoliceException('Status code was {}, should be 200'.format(
                status_code), port_numbers['https'])
        return
    except _AppliancePoliceException as e:
        # special handling for known failure conditions
        if e.port == 443:
            # If we had an error on 443, about 101% of the time it means the UI worker is frozen
            store.current_appliance.ssh_client().run_rails_command('MiqUiWorker.first.kill')
            try:
                store.current_appliance.wait_for_web_ui(900)
                store.write_line('UI worker was frozen and had to be restarted.', purple=True)
                return
            except TimedOutError:
                pass
        e_message = e.message
    except Exception as e:
        e_message = e.args[0]

    # Regardles of the exception raised, we didn't return anywhere above
    # time to call a human
    msg = 'Help! My appliance {} crashed with: {}'.format(store.current_appliance.url, e_message)
    store.slave_manager.message(msg)
    Rdb(msg).set_trace(**{
        'subject': 'RDB Breakpoint: Appliance failure',
        'recipients': ['*****@*****.**', '*****@*****.**'],
    })
    store.slave_manager.message('Resuming testing following remote debugging')
Example #6
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError,
                           exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']
                               ['small_template']['name'])
        except KeyError:
            raise KeyError(
                'small_template not defined for Provider {} in cfme_data.yaml'.
                format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout,
                                                         **deploy_args)
            logger.info("Provisioned VM/instance %s",
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                             vm_name,
                             type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
Example #7
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError,
                           exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=data['small_template'])
        except KeyError:
            raise ValueError(
                'small_template not defined for Provider {} in cfme_data.yaml'
            ).format(provider_key)
    else:
        deploy_args.update(template=template_name)

    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" %
                        mgmt.__class__.__name__)

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s"
        % (vm_name, deploy_args['template'], data['name']))
    try:
        try:
            logger.debug("Deploy args: {}".format(deploy_args))
            vm_name = mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s" %
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error(
                'Could not provisioning VM/instance {} ({}: {})'.format(
                    vm_name,
                    type(e).__name__, str(e)))
            _vm_cleanup(mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
Example #8
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):

    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError, VMInstanceNotCloned,
                           SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)
    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" %
                        mgmt.__class__.__name__)

    if template_name is None:
        template_name = data['small_template']

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s"
        % (vm_name, template_name, data['name']))
    try:
        try:
            logger.debug("Deploy args: " + str(deploy_args))
            vm_name = mgmt.deploy_template(template_name,
                                           timeout=timeout,
                                           **deploy_args)
            logger.info("Provisioned VM/instance %s" %
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s)', vm_name,
                         e)
            try:
                logger.info("VM/Instance status: {}".format(
                    mgmt.vm_status(vm_name)))
            except Exception as f:
                logger.error(
                    "Could not retrieve VM/Instance status: {}: {}".format(
                        type(f).__name__, str(f)))
            logger.info('Attempting cleanup on VM/instance %s', vm_name)
            try:
                if mgmt.does_vm_exist(vm_name):
                    # Stop the vm first
                    logger.warning('Destroying VM/instance %s', vm_name)
                    if mgmt.delete_vm(vm_name):
                        logger.info('VM/instance %s destroyed', vm_name)
                    else:
                        logger.error('Error destroying VM/instance %s',
                                     vm_name)
            except Exception as f:
                logger.error(
                    'Could not destroy VM/instance {} ({}: {})'.format(
                        vm_name,
                        type(f).__name__, str(f)))
            finally:
                raise e
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def appliance_police(appliance):
    if not store.slave_manager:
        return
    try:
        available_ports = {
            'ssh': (appliance.hostname, appliance.ssh_port),
            'https': (appliance.hostname, appliance.ui_port),
            'postgres': (appliance.db_host
                         or appliance.hostname, appliance.db_port)
        }
        port_results = {
            pn: net_check(addr=p_addr, port=p_port, force=True)
            for pn, (p_addr, p_port) in available_ports.items()
        }
        for port, result in port_results.items():
            if port == 'ssh' and appliance.is_pod:
                # ssh is not available for podified appliance
                continue
            if not result:
                raise AppliancePoliceException('Unable to connect',
                                               available_ports[port][1])

        try:
            status_code = requests.get(appliance.url,
                                       verify=False,
                                       timeout=120).status_code
        except Exception:
            raise AppliancePoliceException('Getting status code failed',
                                           available_ports['https'][1])

        if status_code != 200:
            raise AppliancePoliceException(
                'Status code was {}, should be 200'.format(status_code),
                available_ports['https'][1])
        return
    except AppliancePoliceException as e:
        # special handling for known failure conditions
        if e.port == 443:
            # Lots of rdbs lately where evm seems to have entirely crashed
            # and (sadly) the only fix is a rude restart
            appliance.restart_evm_service(rude=True)
            try:
                appliance.wait_for_web_ui(900)
                store.write_line('EVM was frozen and had to be restarted.',
                                 purple=True)
                return
            except TimedOutError:
                pass
        e_message = str(e)
    except Exception as e:
        e_message = str(e)

    # Regardles of the exception raised, we didn't return anywhere above
    # time to call a human
    msg = 'Help! My appliance {} crashed with: {}'.format(
        appliance.url, e_message)
    store.slave_manager.message(msg)
    if 'appliance_police_recipients' in rdb:
        rdb_kwargs = {
            'subject': 'RDB Breakpoint: Appliance failure',
            'recipients': rdb.appliance_police_recipients,
        }
    else:
        rdb_kwargs = {}
    Rdb(msg).set_trace(**rdb_kwargs)
    store.slave_manager.message('Resuming testing following remote debugging')
Example #10
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):

    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError, VMInstanceNotCloned)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_type = conf.cfme_data.get('management_systems', {})[provider_key]['type']
    if provider_type in infra_provider_type_map:
        provider_crud = get_infra_from_config(provider_key)
    else:
        provider_crud = get_cloud_from_config(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)
    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" % mgmt.__class__.__name__)

    if template_name is None:
        template_name = data['small_template']

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s" %
        (vm_name, template_name, data['name']))
    try:
        try:
            logger.debug("Deploy args: " + str(deploy_args))
            vm_name = mgmt.deploy_template(template_name, timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s" % vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s)', vm_name, e)
            try:
                logger.info("VM/Instance status: {}".format(mgmt.vm_status(vm_name)))
            except Exception as f:
                logger.error(
                    "Could not retrieve VM/Instance status: {}: {}".format(
                        type(f).__name__, str(f)))
            logger.info('Attempting cleanup on VM/instance %s', vm_name)
            try:
                if mgmt.does_vm_exist(vm_name):
                    # Stop the vm first
                    logger.warning('Destroying VM/instance %s', vm_name)
                    if mgmt.delete_vm(vm_name):
                        logger.info('VM/instance %s destroyed', vm_name)
                    else:
                        logger.error('Error destroying VM/instance %s', vm_name)
            except Exception as f:
                logger.error(
                    'Could not destroy VM/instance {} ({}: {})'.format(
                        vm_name, type(f).__name__, str(f)))
            finally:
                raise e
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def deploy_template(provider_key, vm_name, template_name=None, timeout=900,
        **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError, exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=data['small_template'])
        except KeyError:
            raise ValueError('small_template not defined for Provider {} in cfme_data.yaml').format(
                provider_key)
    else:
        deploy_args.update(template=template_name)

    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" % mgmt.__class__.__name__)

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s" %
        (vm_name, deploy_args['template'], data['name']))
    try:
        try:
            logger.debug("Deploy args: {}".format(deploy_args))
            vm_name = mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s" % vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance {} ({}: {})'.format(
                vm_name, type(e).__name__, str(e)))
            _vm_cleanup(mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name