def test_broken_angular_select(request):
    """Test that checks the fancy selects do not break.

    Prerequisities:
        * A fresh downstream appliance

    Steps:
        1) Create a catalog.
        2) Create a catalog item, can be Generic and assign the catalog and OSE Installer dialog
            for testing purposes
        3) Try ordering the service, but instead of confirming the form, try changing some select.
    """
    # OSE Installer dialog, one dropdown from it
    the_select = AngularSelect("ose_size")
    cat = Catalog("Test_catalog_{}".format(fauxfactory.gen_alpha()))
    cat.create()
    request.addfinalizer(cat.delete)
    item = CatalogItem(
        item_type="Generic",
        name="Catitem_{}".format(fauxfactory.gen_alpha()),
        description=fauxfactory.gen_alpha(),
        display_in=True,
        catalog=cat.name,
        dialog="OSE Installer")
    item.create()
    request.addfinalizer(item.delete)

    # The check itself
    pytest.sel.force_navigate(
        "order_service_catalog",
        context={"catalog": cat.name, "catalog_item": item})
    fill(the_select, "Medium")
    assert not the_select.is_broken, "The select displayed itself next ot the angular select"
def ansible_repository(request, appliance, wait_for_ansible):
    """
    By default cfme_data.ansible_links.playbook_repositories.embedded_ansible is set for the url,
    but you can specify it explicitly with @pytest.mark.parametrize decorator on your test function.

    Example:
    @pytest.mark.parametrize('ansible_repository', ['nuage'], indirect=True)
    def test_function(ansible_repository):
        ...
    """
    repositories = appliance.collections.ansible_repositories
    try:
        playbooks_yaml = cfme_data.ansible_links.playbook_repositories
        playbook_name = getattr(request, 'param', 'embedded_ansible')
        repository = repositories.create(
            name=fauxfactory.gen_alpha(),
            url=getattr(playbooks_yaml, playbook_name),
            description=fauxfactory.gen_alpha()
        )
    except (KeyError, AttributeError):
        message = "Missing ansible_links content in cfme_data, cannot setup repository"
        logger.exception(message)  # log the exception for debug of the missing content
        pytest.skip(message)
    view = navigate_to(repository, "Details")
    wait_for(
        lambda: view.entities.summary("Properties").get_text_of("Status") == "successful",
        timeout=60,
        fail_func=view.toolbar.refresh.click
    )
    yield repository

    repository.delete_if_exists()
def test_vmware_alarm_selection_does_not_fail(alert_collection):
    """Test the bug that causes CFME UI to explode when VMware Alarm type is selected.

    Metadata:
        test_flag: alerts
    """
    alert_obj = alert_collection.instantiate(
        "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)),
        active=True,
        based_on="VM and Instance",
        evaluate=("VMware Alarm", {}),
        notification_frequency="5 Minutes"
    )
    try:
        alert_collection.create(
            "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)),
            active=True,
            based_on="VM and Instance",
            evaluate=("VMware Alarm", {}),
            notification_frequency="5 Minutes"
        )
    except CFMEExceptionOccured as e:
        pytest.fail("The CFME has thrown an error: {}".format(str(e)))
    except Exception as e:
        view = alert_obj.create_view(AlertDetailsView)
        view.flash.assert_message("At least one of E-mail, SNMP Trap, Timeline Event, or"
            " Management Event must be configured")
    else:
        pytest.fail("Creating this alert passed although it must fail.")
def test_embedded_ansible_credential_crud(credentials_collection, wait_for_ansible, credential_type,
        credentials, appliance):
    credential = credentials_collection.create(
        "{}_credential_{}".format(credential_type, fauxfactory.gen_alpha()),
        credential_type,
        **credentials
    )
    updated_value = "edited_{}".format(fauxfactory.gen_alpha())
    with update(credential):
        if credential.credential_type == "Google Compute Engine":
            credential.service_account = updated_value
        elif credential.credential_type == "Amazon":
            credential.access_key = updated_value
            # credential.username = updated_value
        else:
            credential.username = updated_value
            # credential.access_key = updated_value
    view = navigate_to(credential, "Details")

    def wait_for_changes(field_name):
        cr_opts = view.entities.summary("Credential Options")
        wait_for(
            lambda: cr_opts.get_text_of(field_name) == updated_value,
            fail_func=view.browser.selenium.refresh,
            delay=10,
            timeout=60
        )

    if credential.credential_type == "Amazon":
        wait_for_changes("Access Key")
    elif credential.credential_type == "Google Compute Engine":
        wait_for_changes("Service Account Email Address")
    else:
        wait_for_changes("Username")
    credential.delete()
def generate_policy_event(request, appliance, provider, vm_crud, register_event):
    """ Generate a policy event. This is a function and not a fixture so that it doesn't
    run for every parameter, only those which require policy events.
    """
    # create necessary objects
    action = appliance.collections.actions.create(
        fauxfactory.gen_alpha(), "Tag", dict(tag=("My Company Tags", "Environment", "Development"))
    )
    request.addfinalizer(action.delete)

    policy = appliance.collections.policies.create(
        policies.VMControlPolicy, fauxfactory.gen_alpha()
    )
    request.addfinalizer(policy.delete)

    policy.assign_events("VM Create Complete")
    request.addfinalizer(policy.assign_events)
    policy.assign_actions_to_event("VM Create Complete", action)

    profile = appliance.collections.policy_profiles.create(
        fauxfactory.gen_alpha(), policies=[policy]
    )
    request.addfinalizer(profile.delete)

    # assign the policy profile to the provider
    provider.assign_policy_profiles(profile.description)
    request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description))

    register_event(target_type="VmOrTemplate", target_name=vm_crud.name, event_type="vm_create")

    vm_crud.create_on_provider(find_in_cfme=True)
Beispiel #6
0
def test_same_class_name_different_namespace(request, domain):
    """
    Polarion:
        assignee: dmisharo
        casecomponent: Automate
        initialEstimate: 1/16h
    """
    ns1 = domain.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    ns2 = domain.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )

    c1 = ns1.classes.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric()
    )
    c2 = ns2.classes.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric()
    )
    assert c1.exists
    assert c2.exists

    c1.delete()
    assert not c1.exists
    assert c2.exists
def test_same_class_name_different_namespace(request, domain):
    ns1 = domain.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    ns2 = domain.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )

    c1 = ns1.classes.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric()
    )
    c2 = ns2.classes.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric()
    )
    assert c1.exists
    assert c2.exists

    c1.delete()
    assert not c1.exists
    assert c2.exists
def test_broken_angular_select(request):
    """Test that checks the fancy selects do not break.

    Prerequisities:
        * A fresh downstream appliance

    Steps:
        1) Create a catalog.
        2) Create a catalog item, can be Generic and assign the catalog and OSE Installer dialog
            for testing purposes
        3) Try ordering the service, but instead of confirming the form, try changing some select.
    """
    # OSE Installer dialog, one dropdown from it
    the_select = AngularSelect("param_operatingSystemType")
    cat = Catalog("Test_catalog_{}".format(fauxfactory.gen_alpha()))
    cat.create()
    request.addfinalizer(cat.delete)
    item = CatalogItem(
        item_type="Generic",
        name="Catitem_{}".format(fauxfactory.gen_alpha()),
        description=fauxfactory.gen_alpha(),
        display_in=True,
        catalog=cat.name,
        dialog="azure-single-vm-from-user-image")
    item.create()
    request.addfinalizer(item.delete)
    sc = service_catalogs.ServiceCatalogs(item.name)
    navigate_to(sc, 'Order')
    # The check itself
    fill(the_select, "Linux")
    assert not the_select.is_broken, "The select displayed itself next ot the angular select"
def test_storage_volume_crud(appliance, provider):
    """ Test storage volume crud

    prerequisites:
        * Storage provider

    Steps:
        * Crate new volume
        * Delete volume
    """
    # create volume
    volume_collection = appliance.collections.volumes
    manager_name = '{} Cinder Manager'.format(provider.name)
    volume = volume_collection.create(name=fauxfactory.gen_alpha(),
                                      storage_manager=manager_name,
                                      tenant=provider.data['provisioning']['cloud_tenant'],
                                      size=STORAGE_SIZE,
                                      provider=provider)
    assert volume.exists

    # update volume
    old_name = volume.name
    new_name = fauxfactory.gen_alpha()
    with update(volume):
        volume.name = new_name

    with update(volume):
        volume.name = old_name

    # delete volume
    volume.delete(wait=True)
    assert not volume.exists
def new_instance(provider, zero_disk_flavor):
    flavor_name = zero_disk_flavor.name
    prov_data = provider.data['provisioning']
    prov_form_data = {
        'request': {'email': fauxfactory.gen_email(),
                    'first_name': fauxfactory.gen_alpha(),
                    'last_name': fauxfactory.gen_alpha()},
        'catalog': {'num_vms': '1',
                    'vm_name': fauxfactory.gen_alpha()},
        'environment': {'cloud_network': prov_data['cloud_network'],
                        'cloud_tenant': prov_data['cloud_tenant']},
        'properties': {'instance_type': partial_match(flavor_name)},
    }

    instance_name = prov_form_data['catalog']['vm_name']

    try:
        instance = provider.appliance.collections.cloud_instances.create(
            instance_name,
            provider,
            prov_form_data, find_in_cfme=True
        )

    except KeyError:
        # some yaml value wasn't found
        pytest.skip('Unable to find an image map in provider "{}" provisioning data: {}'
                    .format(provider, prov_data))

    yield instance

    instance.cleanup_on_provider()
Beispiel #11
0
def volume_with_type(appliance, provider):
    vol_type = provider.mgmt.capi.volume_types.create(name=fauxfactory.gen_alpha())
    volume_type = appliance.collections.volume_types.instantiate(vol_type.name, provider)

    @wait_for_decorator(delay=10, timeout=300,
                        message="Waiting for volume type to appear")
    def volume_type_is_displayed():
        volume_type.refresh()
        return volume_type.exists

    collection = appliance.collections.volumes
    storage_manager = '{} Cinder Manager'.format(provider.name)
    volume = collection.create(name=fauxfactory.gen_alpha(),
                               storage_manager=storage_manager,
                               tenant=provider.data['provisioning']['cloud_tenant'],
                               volume_type=volume_type.name,
                               size=VOLUME_SIZE,
                               provider=provider)
    yield volume

    if volume.exists:
        volume.delete(wait=False)

    if volume_type.exists:
        provider.mgmt.capi.volume_types.delete(vol_type)
Beispiel #12
0
def test_domain_crud(request, enabled, appliance):
    """
    Polarion:
        assignee: ghubale
        casecomponent: Automate
        caseimportance: critical
        initialEstimate: 1/30h
        tags: automate
    """
    domain = appliance.collections.domains.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha(),
        enabled=enabled)
    request.addfinalizer(domain.delete_if_exists)
    assert domain.exists
    view = navigate_to(domain, 'Details')
    if enabled:
        assert 'Disabled' not in view.title.text
    else:
        assert 'Disabled' in view.title.text
    updated_description = "editdescription{}".format(fauxfactory.gen_alpha())
    with update(domain):
        domain.description = updated_description
    view = navigate_to(domain, 'Edit')
    assert view.description.value == updated_description
    assert domain.exists
    domain.delete(cancel=True)
    assert domain.exists
    domain.delete()
    assert not domain.exists
Beispiel #13
0
def backup(appliance, provider):
    volume_collection = appliance.collections.volumes
    storage_manager = '{} Cinder Manager'.format(provider.name)
    backup_collection = appliance.collections.volume_backups.filter({'provider': provider})

    # create new volume
    volume = volume_collection.create(name=fauxfactory.gen_alpha(),
                                      storage_manager=storage_manager,
                                      tenant=provider.data['provisioning']['cloud_tenant'],
                                      size=STORAGE_SIZE,
                                      provider=provider)

    # create new backup for crated volume
    if volume.status == 'available':
        backup_name = fauxfactory.gen_alpha()
        volume.create_backup(backup_name)
        backup = backup_collection.instantiate(backup_name, provider)
        yield backup
    else:
        pytest.skip('Skipping volume backup tests, provider side volume creation fails')

    try:
        if backup.exists:
            backup_collection.delete(backup)
        if volume.exists:
            volume.delete(wait=False)
    except Exception:
        logger.warning('Exception during volume deletion - skipping..')
def klass(namespace):
    klass_ = namespace.classes.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    klass_.schema.add_field(name="execute", type="Method", data_type="String")
    return klass_
def test_embedded_ansible_credential_crud(request, wait_for_ansible, credential_type, credentials):
    credentials_collection = CredentialsCollection()
    credential = credentials_collection.create(
        "{}_credential_{}".format(credential_type, fauxfactory.gen_alpha()),
        credential_type,
        **credentials
    )
    updated_value = "edited_{}".format(fauxfactory.gen_alpha())
    with update(credential):
        if credential.credential_type != "Amazon":
            credential.username = updated_value
        else:
            credential.access_key = updated_value
    view = navigate_to(credential, "Details")

    def wait_for_changes(field_name):
        wait_for(
            lambda: view.credential_options.get_text_of(field_name) == updated_value,
            fail_func=view.browser.selenium.refresh,
            delay=10,
            timeout=60
        )

    if credential.credential_type == "Amazon":
        wait_for_changes("Access Key")
    else:
        wait_for_changes("Username")
    credential.delete()
Beispiel #16
0
def test_vm_migration_after_assigning_tenant_quota(appliance, small_vm, set_roottenant_quota,
                                                   custom_prov_data, provider):
    """
    Polarion:
        assignee: ghubale
        casecomponent: Infra
        caseimportance: high
        initialEstimate: 1/6h
        tags: quota
        testSteps:
            1. Create VM
            2. Assign tenant quota
            3. Migrate VM
            4. Check whether migration is successfully done
    """

    migrate_to = check_hosts(small_vm, provider)
    small_vm.migrate_vm(fauxfactory.gen_email(), fauxfactory.gen_alpha(),
                        fauxfactory.gen_alpha(), host=migrate_to)
    request_description = small_vm.name
    cells = {'Description': request_description, 'Request Type': 'Migrate'}
    migrate_request = appliance.collections.requests.instantiate(request_description, cells=cells,
                                                                 partial_check=True)
    migrate_request.wait_for_request(method='ui')
    msg = "Request failed with the message {}".format(migrate_request.row.last_message.text)
    assert migrate_request.is_succeeded(method='ui'), msg
def test_update_embedded_ansible_webui(enabled_embedded_appliance, appliance, old_version):
    """ Tests updating an appliance which has embedded ansible role enabled, also confirms that the
        role continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    update_appliance(enabled_embedded_appliance)
    wait_for(do_appliance_versions_match, func_args=(appliance, enabled_embedded_appliance),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    assert wait_for(func=lambda: enabled_embedded_appliance.is_embedded_ansible_running, num_sec=90)
    assert wait_for(func=lambda: enabled_embedded_appliance.rabbitmq_server.running, num_sec=60)
    assert wait_for(func=lambda: enabled_embedded_appliance.nginx.running, num_sec=60)
    repositories = enabled_embedded_appliance.collections.ansible_repositories
    name = "example_{}".format(fauxfactory.gen_alpha())
    description = "edited_{}".format(fauxfactory.gen_alpha())
    try:
        repository = repositories.create(
            name=name,
            url=cfme_data.ansible_links.playbook_repositories.console_db,
            description=description)
    except KeyError:
        pytest.skip("Skipping since no such key found in yaml")
    view = navigate_to(repository, "Details")
    refresh = view.toolbar.refresh.click
    wait_for(
        lambda: view.entities.summary("Properties").get_text_of("Status").lower() == "successful",
        timeout=60,
        fail_func=refresh
    )
def test_vmware_vimapi_hotadd_disk(
        request, testing_group, provider, testing_vm, domain, cls):
    """ Tests hot adding a disk to vmware vm.

    This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi``

    Steps:
        * It creates an instance in ``System/Request`` that can be accessible from eg. a button.
        * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The
            button shall belong in the VM and instance button group.
        * After the button is created, it goes to a VM's summary page, clicks the button.
        * The test waits until the capacity of disks is raised.

    Metadata:
        test_flag: hotdisk, provision
    """
    meth = cls.methods.create(
        name='load_value_{}'.format(fauxfactory.gen_alpha()),
        script=dedent('''\
            # Sets the capacity of the new disk.

            $evm.root['size'] = 1  # GB
            exit MIQ_OK
            '''))

    request.addfinalizer(meth.delete_if_exists)

    # Instance that calls the method and is accessible from the button
    instance = cls.instances.create(
        name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()),
        fields={
            "meth4": {'value': meth.name},  # To get the value
            "rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"},
        },
    )

    request.addfinalizer(instance.delete_if_exists)

    # Button that will invoke the dialog and action
    button_name = fauxfactory.gen_alphanumeric()
    button = Button(group=testing_group,
                    text=button_name,
                    hover=button_name, system="Request", request=instance.name)
    request.addfinalizer(button.delete_if_exists)
    button.create()

    def _get_disk_capacity():
        testing_vm.summary.reload()
        return testing_vm.summary.datastore_allocation_summary.total_allocation.value

    original_disk_capacity = _get_disk_capacity()
    logger.info('Initial disk allocation: %s', original_disk_capacity)
    toolbar.select(testing_group.text, button.text)
    flash.assert_no_errors()
    try:
        wait_for(
            lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5)
    finally:
        logger.info('End disk capacity: %s', _get_disk_capacity())
Beispiel #19
0
def test_duplicate_namespace_disallowed(request, parent_namespace):
    ns = parent_namespace.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha())
    with error.expected("Name has already been taken"):
        parent_namespace.namespaces.create(
            name=ns.name,
            description=ns.description)
Beispiel #20
0
def test_duplicate_namespace_disallowed(request, parent_namespace):
    ns = parent_namespace.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha())
    with pytest.raises(Exception, match="Name has already been taken"):
        parent_namespace.namespaces.create(
            name=ns.name,
            description=ns.description)
Beispiel #21
0
def domain(appliance):
    """This fixture used to create automate domain - Datastore/Domain"""
    domain = appliance.collections.domains.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha(),
        enabled=True)
    yield domain
    domain.delete_if_exists()
def policy_profile(request):
    policy = VMControlPolicy(fauxfactory.gen_alpha())
    policy.create()
    request.addfinalizer(policy.delete)
    profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy])
    profile.create()
    request.addfinalizer(profile.delete)
    return profile
Beispiel #23
0
def namespace(domain):
    """This fixture used to create automate namespace - Datastore/Domain/Namespace"""
    namespace = domain.namespaces.create(
        name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    yield namespace
    namespace.delete_if_exists()
Beispiel #24
0
def domain(appliance):
    dc = DomainCollection(appliance)
    d = dc.create(
        name='test_{}'.format(fauxfactory.gen_alpha()),
        description='desc_{}'.format(fauxfactory.gen_alpha()),
        enabled=True)
    yield d
    d.delete()
Beispiel #25
0
def parent_namespace(request, domain):
    if request.param == 'plain':
        return domain
    else:
        return domain.namespaces.create(
            name=fauxfactory.gen_alpha(),
            description=fauxfactory.gen_alpha()
        )
Beispiel #26
0
def test_vm_create(request, appliance, vm_crud, provider, register_event):
    """ Test whether vm_create_complete event is emitted.

    Prerequisities:
        * A provider that is set up and able to deploy VMs

    Steps:
        * Create a Control setup (action, policy, profile) that apply a tag on a VM when
            ``VM Create Complete`` event comes
        * Deploy the VM outside of CFME (directly in the provider)
        * Refresh provider relationships and wait for VM to appear
        * Assert the tag appears.

    Metadata:
        test_flag: provision, events

    Polarion:
        assignee: jdupuy
        casecomponent: Events
        caseimportance: high
        initialEstimate: 1/8h
    """
    action = appliance.collections.actions.create(
        fauxfactory.gen_alpha(),
        "Tag",
        dict(tag=("My Company Tags", "Environment", "Development")))
    request.addfinalizer(action.delete)

    policy = appliance.collections.policies.create(
        VMControlPolicy,
        fauxfactory.gen_alpha()
    )
    request.addfinalizer(policy.delete)

    policy.assign_events("VM Create Complete")

    @request.addfinalizer
    def _cleanup():
        policy.unassign_events("VM Create Complete")

    policy.assign_actions_to_event("VM Create Complete", action)

    profile = appliance.collections.policy_profiles.create(
        fauxfactory.gen_alpha(), policies=[policy])
    request.addfinalizer(profile.delete)

    provider.assign_policy_profiles(profile.description)
    request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description))

    register_event(target_type='VmOrTemplate', target_name=vm_crud.name, event_type='vm_create')

    vm_crud.create_on_provider(find_in_cfme=True)

    def _check():
        return any(tag.category.display_name == "Environment" and tag.display_name == "Development"
                   for tag in vm_crud.get_tags())

    wait_for(_check, num_sec=300, delay=15, message="tags to appear")
Beispiel #27
0
def klass(namespace):
    """This fixture used to create automate class - Datastore/Domain/Namespace/Class"""
    klass = namespace.classes.create(
        name=fauxfactory.gen_alpha(),
        display_name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    yield klass
    klass.delete_if_exists()
def _create_policy_profile(appliance, server_name):
    policy = appliance.collections.policies.create(
        MiddlewareServerCompliancePolicy,
        fauxfactory.gen_alpha(),
        scope="fill_field(Middleware Server : Product, INCLUDES, {})".format(server_name)
    )
    policy_profile = appliance.collections.policy_profiles.create(
        fauxfactory.gen_alpha(), policies=[policy])
    return policy_profile
Beispiel #29
0
    def test_gen_alpha_4(self):
        """
        @Test: Create alpha string with negative length
        @Feature: String Generator
        @Assert: String is not created due to value error
        """

        with self.assertRaises(ValueError):
            gen_alpha(length=-1)
def ansible_repository(appliance, wait_for_ansible):
    repositories = appliance.collections.ansible_repositories
    repository = repositories.create(
        name=fauxfactory.gen_alpha(),
        url="https://github.com/quarckster/ansible_playbooks",
        description=fauxfactory.gen_alpha())
    yield repository

    if repository.exists:
        repository.delete()
def generate_runtime_name(file_path):
    return "{}_{}".format(
        fauxfactory.gen_alpha(8).lower(), os.path.basename(file_path))
Beispiel #32
0
def test_edit_subnet(subnet):
    """Edits private subnet's name"""
    subnet.edit(new_name=fauxfactory.gen_alpha())
    wait_for(subnet.provider_obj.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600,
             delay=10)
    assert subnet.exists
Beispiel #33
0
    def setUp(self):
        """Creates the pre-requisites for the Incremental updates that used per
        each test"""
        super().setUp()
        # Create content view that will be used filtered erratas
        self.rhel_6_partial_cv = entities.ContentView(
            organization=self.org,
            name=gen_alpha(),
            repository=[self.rhva_6_repo, self.rhel6_sat6tools_repo],
        ).create()

        # Create a content view filter to filter out errata
        rhel_6_partial_cvf = entities.ErratumContentViewFilter(
            content_view=self.rhel_6_partial_cv,
            type='erratum',
            name='rhel_6_partial_cv_filter',
            repository=[self.rhva_6_repo],
        ).create()

        # Create a content view filter rule - filtering out errata in the last
        # 365 days
        start_date = (date.today() - timedelta(days=365)).strftime('%Y-%m-%d')
        entities.ContentViewFilterRule(
            content_view_filter=rhel_6_partial_cvf,
            types=['security', 'enhancement', 'bugfix'],
            start_date=start_date,
            end_date=date.today().strftime('%Y-%m-%d'),
        ).create()

        # Publish content view and re-read it

        self.rhel_6_partial_cv.publish()
        self.rhel_6_partial_cv = self.rhel_6_partial_cv.read()

        # Promote content view to 'DEV' and 'QE'
        assert len(self.rhel_6_partial_cv.version) == 1
        for env in (self.dev_lce, self.qe_lce):
            promote(self.rhel_6_partial_cv.version[0], env.id)

        # Create host collection
        self.rhel_6_partial_hc = entities.HostCollection(organization=self.org,
                                                         name=gen_alpha(),
                                                         max_hosts=5).create()

        # Create activation key for content view
        kwargs = {'organization': self.org, 'environment': self.qe_lce.id}
        rhel_6_partial_ak = entities.ActivationKey(
            name=gen_alpha(),
            content_view=self.rhel_6_partial_cv,
            host_collection=[self.rhel_6_partial_hc],
            **kwargs,
        ).create()

        # Fetch available subscriptions
        subs = entities.Subscription(organization=self.org).search()
        assert len(subs) > 0

        # Add default subscription to activation key
        sub_found = False
        for sub in subs:
            if sub.name == DEFAULT_SUBSCRIPTION_NAME:
                rhel_6_partial_ak.add_subscriptions(
                    data={'subscription_id': sub.id})
                sub_found = True
        assert sub_found

        # Enable product content in activation key
        rhel_6_partial_ak.content_override(
            data={
                'content_overrides': [{
                    'content_label': REPOS['rhst6']['id'],
                    'value': '1'
                }]
            })

        # Create client machine and register it to satellite with
        # rhel_6_partial_ak
        self.vm = VirtualMachine(distro=DISTRO_RHEL6, tag='incupdate')
        self.addCleanup(vm_cleanup, self.vm)
        self.setup_vm(self.vm, rhel_6_partial_ak.name, self.org.label)
        self.vm.enable_repo(REPOS['rhva6']['id'])
        timestamp = datetime.utcnow()
        self.vm.run(f'yum install -y {REAL_0_RH_PACKAGE}')

        # Find the content host and ensure that tasks started by package
        # installation has finished
        host = entities.Host().search(
            query={'search': f'name={self.vm.hostname}'})
        wait_for_tasks(
            search_query='label = Actions::Katello::Host::UploadPackageProfile'
            ' and resource_id = {}'
            ' and started_at >= "{}"'.format(host[0].id, timestamp))
        # Force host to generate or refresh errata applicability
        call_entity_method_with_timeout(host[0].errata_applicability,
                                        timeout=600)
Beispiel #34
0
def alpha_strings_generator(items=1, length=10):
    """Generate alpha string value at each iteration."""
    for _ in range(items):
        yield fauxfactory.gen_alpha(length=length)
def parent_namespace(request, domain):
    if request.param == 'plain':
        return domain
    else:
        return domain.namespaces.create(name=fauxfactory.gen_alpha(),
                                        description=fauxfactory.gen_alpha())
Beispiel #36
0
def domain_rest(appliance, domain):
    domain = appliance.collections.domains.create(
        name=fauxfactory.gen_alpha(), description=fauxfactory.gen_alpha(), enabled=True
    )
    yield appliance.rest_api.collections.automate_domains.get(name=domain.name)
    domain.delete_if_exists()
Beispiel #37
0
def test_edit_volume(volume, appliance):
    new_name = fauxfactory.gen_alpha()
    with update(volume):
        volume.name = new_name
    view = navigate_to(appliance.collections.volumes, 'All')
    assert view.entities.get_entity(name=new_name, surf_pages=True)
# -*- coding: utf-8 -*-
import fauxfactory
import pytest

from cfme import test_requirements
from cfme.fixtures.automate import DatastoreImport
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log_validator import LogValidator
from cfme.utils.wait import wait_for

pytestmark = [test_requirements.dialog, pytest.mark.tier(2)]

WIDGETS = {
    "Text Box": ("input", fauxfactory.gen_alpha()),
    "Check Box": ("checkbox", True),
    "Text Area": ("input", fauxfactory.gen_alpha()),
    "Radio Button": ("radiogroup", None),
    "Dropdown": ("dropdown", "One"),
    "Tag Control": ("dropdown", "Production Linux Team"),
    "Timepicker": ("input", "")
}


@pytest.fixture(scope="function")
def service_dialog(appliance, widget_name):
    service_dialog = appliance.collections.service_dialogs
    element_data = {
        'element_information': {
            'ele_label': fauxfactory.gen_alphanumeric(start="label_"),
            'ele_name': fauxfactory.gen_alphanumeric(start="name_"),
Beispiel #39
0
 def gen_value(self):
     """Return a value suitable for a :class:`URLField`."""
     return gen_url(subdomain=gen_alpha())
Beispiel #40
0
def test_migration_long_name(request, appliance, v2v_providers, host_creds,
                             conversion_tags):
    """Test to check VM name with 64 character should work

    Polarion:
        assignee: sshveta
        initialEstimate: 1h
    """
    source_datastores_list = v2v_providers.vmware_provider.data.get(
        "datastores", [])
    source_datastore = [
        d.name for d in source_datastores_list if d.type == "nfs"
    ][0]
    collection = appliance.provider_based_collection(
        v2v_providers.vmware_provider)

    # Following code will create vm name with 64 characters
    vm_name = "{vm_name}{extra_words}".format(
        vm_name=random_vm_name(context="v2v"),
        extra_words=fauxfactory.gen_alpha(51))
    vm_obj = collection.instantiate(
        name=vm_name,
        provider=v2v_providers.vmware_provider,
        template_name=rhel7_minimal(v2v_providers.vmware_provider)["name"],
    )
    vm_obj.create_on_provider(timeout=2400,
                              find_in_cfme=True,
                              allow_skip="default",
                              datastore=source_datastore)
    request.addfinalizer(lambda: vm_obj.cleanup_on_provider())
    form_data = _form_data(v2v_providers.vmware_provider,
                           v2v_providers.rhv_provider)

    infrastructure_mapping_collection = appliance.collections.v2v_mappings
    mapping = infrastructure_mapping_collection.create(form_data)

    @request.addfinalizer
    def _cleanup():
        infrastructure_mapping_collection.delete(mapping)

    migration_plan_collection = appliance.collections.v2v_plans
    migration_plan = migration_plan_collection.create(
        name="long_name_{}".format(fauxfactory.gen_alphanumeric()),
        description="desc_long_name{}".format(fauxfactory.gen_alphanumeric()),
        infra_map=mapping.name,
        vm_list=[vm_obj],
        start_migration=True,
    )

    # explicit wait for spinner of in-progress status card
    view = appliance.browser.create_view(
        navigator.get_class(migration_plan_collection, "All").VIEW.pick())
    wait_for(func=view.progress_card.is_plan_started,
             func_args=[migration_plan.name],
             message="migration plan is starting, be patient please",
             delay=5,
             num_sec=150,
             handle_exception=True,
             fail_cond=False)

    # wait until plan is in progress
    wait_for(
        func=view.plan_in_progress,
        func_args=[migration_plan.name],
        message="migration plan is in progress, be patient please",
        delay=5,
        num_sec=1800,
    )
    view.switch_to("Completed Plans")
    view.wait_displayed()
    migration_plan_collection.find_completed_plan(migration_plan)
    logger.info(
        "For plan {plan_name}, migration status : {count}, total time elapsed: {clock}"
        .format(plan_name=migration_plan.name,
                count=view.migration_plans_completed_list.get_vm_count_in_plan(
                    migration_plan.name),
                clock=view.migration_plans_completed_list.get_clock(
                    migration_plan.name)))

    # validate MAC address matches between source and target VMs
    assert view.migration_plans_completed_list.is_plan_succeeded(
        migration_plan.name)
    migrated_vm = get_migrated_vm_obj(vm_obj, v2v_providers.rhv_provider)
    assert vm_obj.mac_address == migrated_vm.mac_address
def test_positive_generate_ansible_template():
    """Report template named 'Ansible Inventory' (default name is specified in settings)
    must be present in Satellite 6.7 and later in order to provide enhanced functionality
    for Ansible Tower inventory synchronization with Satellite.

    :id: f1f7adfc-9601-4498-95c8-3e82e2b36583

    :setup:
        1. A user with minimal required permissions: 'Ansible Tower Inventory Reader' role
        2. A fake host to be checked in report output

    :steps:
        1. Check settings for default Ansible Inventory template name and ensure
           the template is present
        2. Try to render the template using the user with ATIR role
        3. Check the fake host is present in the output

    :expectedresults: Report template is present, renederable and provides output

    :CaseImportance: Medium
    """
    settings = Settings.list({'search': 'name=ansible_inventory_template'})
    assert 1 == len(settings)
    template_name = settings[0]['value']

    report_list = ReportTemplate.list()
    assert template_name in [rt['name'] for rt in report_list]

    login = gen_alpha().lower()
    password = gen_alpha().lower()
    loc = Location.info({'name': DEFAULT_LOC})
    org = Org.info({'name': DEFAULT_ORG})

    user = make_user({
        'login': login,
        'password': password,
        'organization-ids': org['id'],
        'location-ids': loc['id'],
    })

    User.add_role({
        'login': user['login'],
        'role': 'Ansible Tower Inventory Reader'
    })

    host_name = gen_alpha().lower()
    host = make_fake_host({'name': host_name})

    schedule = ReportTemplate.with_user(username=user['login'],
                                        password=password).schedule(
                                            {'name': template_name})

    report_data = ReportTemplate.with_user(username=user['login'],
                                           password=password).report_data({
                                               'name':
                                               template_name,
                                               'job-id':
                                               schedule[0].split(
                                                   'Job ID: ', 1)[1]
                                           })

    assert host['name'] in [
        item.split(',')[1] for item in report_data if len(item) > 0
    ]
def test_negative_nonauthor_of_report_cant_download_it():
    """The resulting report should only be downloadable by
       the user that generated it or admin. Check.

    :id: a4bc77db-146e-4871-a42e-e93887464986

    :setup: Installed Satellite, user that can list running tasks

    :steps:

        1. hammer -u u1 -p p1 report-template schedule
        2. hammer -u u2 -p p2 report-template report-data

    :expectedresults: Report can't be downloaded. Error.
    """
    uname_viewer = gen_alpha()
    uname_viewer2 = gen_alpha()
    password = gen_alpha()

    loc = Location.info({'name': DEFAULT_LOC})
    org = Org.info({'name': DEFAULT_ORG})

    user1 = make_user({
        'login': uname_viewer,
        'password': password,
        'organization-ids': org['id'],
        'location-ids': loc['id'],
    })

    user2 = make_user({
        'login': uname_viewer2,
        'password': password,
        'organization-ids': org['id'],
        'location-ids': loc['id'],
    })

    role = make_role()
    # Pick permissions by its resource type
    permissions_org = [
        permission['name'] for permission in Filter.available_permissions(
            {'search': 'resource_type=Organization'})
    ]
    permissions_loc = [
        permission['name'] for permission in Filter.available_permissions(
            {'search': 'resource_type=Location'})
    ]
    permissions_rt = [
        permission['name'] for permission in Filter.available_permissions(
            {'search': 'resource_type=ReportTemplate'})
    ]
    permissions_pt = [
        permission['name'] for permission in Filter.available_permissions(
            {'search': 'resource_type=ProvisioningTemplate'})
    ]
    permissions_jt = [
        permission['name'] for permission in Filter.available_permissions(
            {'search': 'resource_type=JobTemplate'})
    ]
    # Assign filters to created role
    for perm in [
            permissions_org, permissions_loc, permissions_rt, permissions_pt,
            permissions_jt
    ]:
        make_filter({'role-id': role['id'], 'permissions': perm})
    User.add_role({'login': user1['login'], 'role-id': role['id']})
    User.add_role({'login': user2['login'], 'role-id': role['id']})

    name = gen_alpha()
    content = gen_alpha()

    report_template = ReportTemplate.with_user(username=user1['login'],
                                               password=password).create({
                                                   'name':
                                                   name,
                                                   'organization-id':
                                                   org['id'],
                                                   'location-id':
                                                   loc['id'],
                                                   'file':
                                                   content
                                               })

    schedule = ReportTemplate.with_user(username=user1['login'],
                                        password=password).schedule(
                                            {'name': report_template['name']})

    report_data = ReportTemplate.with_user(username=user1['login'],
                                           password=password).report_data({
                                               'id':
                                               report_template['name'],
                                               'job-id':
                                               schedule[0].split(
                                                   'Job ID: ', 1)[1]
                                           })

    assert content in report_data
    with pytest.raises(CLIReturnCodeError):
        ReportTemplate.with_user(username=user2['login'],
                                 password=password).report_data({
                                     'id':
                                     report_template['name'],
                                     'job-id':
                                     schedule[0].split('Job ID: ', 1)[1]
                                 })
Beispiel #43
0
def domain(request, appliance):
    dom = DomainCollection(appliance).create(name=fauxfactory.gen_alpha(), enabled=True)
    request.addfinalizer(dom.delete_if_exists)
    return dom
        "timer_week": "3 Weeks",
        "hour": "12",
        "minute": "5",
        "time_zone": "(GMT+10:00) Melbourne",
    },
    "once": {
        "run": "Once",
        "hour": "12",
        "minute": "5",
        "time_zone": "(GMT+10:00) Melbourne",
    },
}

INVALID_EMAILS = {
    "string":
    "{name}".format(name=fauxfactory.gen_alpha()),
    "multiple-dots":
    "{name}..{name}@example..com".format(name=fauxfactory.gen_alpha(5)),
    "brackets":
    "{name}@example.com({name})".format(name=fauxfactory.gen_alpha(5)),
    "leading-dot":
    ".{name}@example.com".format(name=fauxfactory.gen_alpha(5)),
    "dash":
    "{name}@-example.com".format(name=fauxfactory.gen_alpha(5)),
    "missing-@":
    "{name}.example.com".format(name=fauxfactory.gen_alpha(5)),
    "trailing-dot":
    "{name}[email protected]".format(name=fauxfactory.gen_alpha(5)),
    "missing-username":
    "******",
}
Beispiel #45
0
def test_vmware_vimapi_hotadd_disk(request, testing_group, provider,
                                   testing_vm, domain, cls):
    """ Tests hot adding a disk to vmware vm.

    This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi``

    Steps:
        * It creates an instance in ``System/Request`` that can be accessible from eg. a button.
        * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The
            button shall belong in the VM and instance button group.
        * After the button is created, it goes to a VM's summary page, clicks the button.
        * The test waits until the capacity of disks is raised.

    Metadata:
        test_flag: hotdisk, provision
    """
    meth = cls.methods.create(name='load_value_{}'.format(
        fauxfactory.gen_alpha()),
                              script=dedent('''\
            # Sets the capacity of the new disk.

            $evm.root['size'] = 1  # GB
            exit MIQ_OK
            '''))

    request.addfinalizer(meth.delete_if_exists)

    # Instance that calls the method and is accessible from the button
    instance = cls.instances.create(
        name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()),
        fields={
            "meth4": {
                'value': meth.name
            },  # To get the value
            "rel5": {
                'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"
            },
        },
    )

    request.addfinalizer(instance.delete_if_exists)

    # Button that will invoke the dialog and action
    button_name = fauxfactory.gen_alphanumeric()
    button = Button(group=testing_group,
                    text=button_name,
                    hover=button_name,
                    system="Request",
                    request=instance.name)
    request.addfinalizer(button.delete_if_exists)
    button.create()

    def _get_disk_capacity():
        testing_vm.summary.reload()
        return testing_vm.summary.datastore_allocation_summary.total_allocation.value

    original_disk_capacity = _get_disk_capacity()
    logger.info('Initial disk allocation: %s', original_disk_capacity)
    toolbar.select(testing_group.text, button.text)
    flash.assert_no_errors()
    try:
        wait_for(lambda: _get_disk_capacity() > original_disk_capacity,
                 num_sec=180,
                 delay=5)
    finally:
        logger.info('End disk capacity: %s', _get_disk_capacity())
Beispiel #46
0
def namespace(request, domain):
    return domain.namespaces.create(name=fauxfactory.gen_alpha(),
                                    description=fauxfactory.gen_alpha())
Beispiel #47
0
 def _assign_profile_to_vm(vm):
     profile = appliance.collections.policy_profiles.create(
         'ssa_policy_profile_{}'.format(fauxfactory.gen_alpha()), policies=[ssa_policy])
     request.addfinalizer(profile.delete)
     vm.assign_policy_profiles(profile.description)
     request.addfinalizer(lambda: vm.unassign_policy_profiles(profile.description))
Beispiel #48
0
def test_folder_field_scope(request, vmware_provider, vmware_vm):
    """This test tests the bug that makes the folder filter in expression not work.

    Prerequisities:
        * A VMware provider.
        * A VM on the provider.
        * A tag to assign.

    Steps:
        * Read the VM's 'Parent Folder Path (VMs & Templates)' from its summary page.
        * Create an action for assigning the tag to the VM.
        * Create a policy, for scope use ``Field``, field name
            ``VM and Instance : Parent Folder Path (VMs & Templates)``, ``INCLUDES`` and the
            folder name as stated on the VM's summary page.
        * Assign the ``VM Discovery`` event to the policy.
        * Assign the action to the ``VM Discovery`` event.
        * Create a policy profile and assign the policy to it.
        * Assign the policy profile to the provider.
        * Delete the VM from the CFME database.
        * Initiate provider refresh and wait for VM to appear again.
        * Assert that the VM gets tagged by the tag.
    """
    # Retrieve folder location
    folder = None
    tags = vmware_vm.get_tags()
    for tag in tags:
        if "Parent Folder Path (VMs & Templates)" in tag:
            folder = tag.split(":", 1)[-1].strip()
            logger.info("Detected folder: %s", folder)
            break
    else:
        pytest.fail("Could not read the folder from the tags:\n{}".format(
            repr(tags)))

    # Create Control stuff
    action = Action(fauxfactory.gen_alpha(), "Tag",
                    dict(tag=("My Company Tags", "Service Level", "Platinum")))
    action.create()
    request.addfinalizer(action.delete)
    policy = VMControlPolicy(
        fauxfactory.gen_alpha(),
        scope=
        ("fill_field(VM and Instance : Parent Folder Path (VMs & Templates), "
         "INCLUDES, {})".format(folder)))
    policy.create()
    request.addfinalizer(policy.delete)
    policy.assign_events("VM Discovery")
    request.addfinalizer(policy.assign_events)  # Unassigns
    policy.assign_actions_to_event("VM Discovery", action)
    profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy])
    profile.create()
    request.addfinalizer(profile.delete)

    # Assign policy profile to the provider
    vmware_provider.assign_policy_profiles(profile.description)
    request.addfinalizer(
        lambda: vmware_provider.unassign_policy_profiles(profile.description))

    # Delete and rediscover the VM
    vmware_vm.delete()
    vmware_vm.wait_for_delete()
    vmware_provider.refresh_provider_relationships()
    vmware_vm.wait_to_appear()

    # Wait for the tag to appear
    wait_for(vmware_vm.get_tags,
             num_sec=600,
             delay=15,
             fail_condition=lambda tags: "Service Level: Platinum" not in tags,
             message="vm be tagged")
Beispiel #49
0
def vm_crud(provider, small_template):
    return VM.factory(
        'test_genealogy_{}'.format(fauxfactory.gen_alpha(length=8).lower()),
        provider, template_name=small_template)
Beispiel #50
0
def generate_ds_name(ds_name):
    return "{}{}".format(ds_name, fauxfactory.gen_alpha(8).lower())
Beispiel #51
0
def test_alert_hardware_reconfigured(request, appliance, configure_fleecing,
                                     smtp_test, create_vm, setup_for_alerts):
    """Tests alert based on "Hardware Reconfigured" evaluation.

    According https://bugzilla.redhat.com/show_bug.cgi?id=1396544 Hardware Reconfigured alerts
    require drift history. So here are the steps for triggering hardware reconfigured alerts based
    on CPU Count:
        1. Run VM smart state analysis.
        2. Change CPU count.
        3. Run VM smart state analysis again.
        4. Run VM reconfigure again.
    Then the alert for CPU count change should be triggered. It is either CPU increased or decreased
    depending on what has been done in your step 2, not the result of step 4. Step 4 is just to
    trigger the event.

    Bugzilla:
        1396544
        1730805

    Metadata:
        test_flag: alerts, provision

    Polarion:
        assignee: jdupuy
        casecomponent: Control
        initialEstimate: 1/4h
    """
    vm = create_vm
    email = fauxfactory.gen_email()
    service_request_desc = (
        "VM Reconfigure for: {0} - Processor Sockets: {1}, "
        "Processor Cores Per Socket: 1, Total Processors: {1}")
    alert = appliance.collections.alerts.create(
        fauxfactory.gen_alpha(length=36,
                              start="Trigger by hardware reconfigured "),
        active=True,
        based_on="VM and Instance",
        evaluate=("Hardware Reconfigured", {
            "hardware_attribute": "Number of CPU Cores",
            "operator": "Increased",
        }),
        notification_frequency="1 Minute",
        emails=email)
    request.addfinalizer(alert.delete)
    setup_for_alerts(request, [alert], vm_name=vm.name)
    wait_for_ssa_enabled(vm)
    sockets_count = vm.configuration.hw.sockets

    for i in range(1, 3):
        do_scan(vm, rediscover=False)
        vm.reconfigure(
            changes={
                "cpu": True,
                "sockets": str(sockets_count + i),
                "disks": (),
                "network_adapters": ()
            })
        service_request = appliance.collections.requests.instantiate(
            description=service_request_desc.format(vm.name, sockets_count +
                                                    i))
        service_request.wait_for_request(method="ui", num_sec=300, delay=10)
    wait_for_alert(smtp_test,
                   alert,
                   delay=30 * 60,
                   additional_checks={
                       "text": vm.name,
                       "from_address": email
                   })
yggrQk58C+xu31BBq3Cb0PAX0BM3N248G7bm71ZG05yovqNwUe5QA7OvDgH/l5sL
PQeeuqiGpnfR4wk2yN7/TFMCgYAXYWWl43wjT9lg97nMP0n6NAOs0icSGSNfxecG
ck0VjO4uFH91iUmuFbp4OT1MZkgjLL/wJvM2WzkSywP4CxW/h6bV35TOCZOSu26k
3a7wK8t60Fvm8ifEYUBzIfZRNAfajZHefPmYfwOD3RsbcqmLgRBBj1X7Pdu2/8LI
TXXaywKBgQCaXeEZ5BTuD7FvMSX95EamDJ/DMyE8TONwDHMIowf2IQbf0Y5U7ntK
6pm5O95cJ7l2m3jUbKIUy0Y8HPW2MgwstcZXKkzlR/IOoSVgdiAnPjVKlIUvVBUx
0u7GxCs5nfyEPjEHTBn1g7Z6U8c6x1r7F50WsLzJftLfqo7tElNO5A==
-----END RSA PRIVATE KEY-----
"""


CREDENTIALS = [
    (
        "Machine",
        {
            "username": fauxfactory.gen_alpha(),
            "password": fauxfactory.gen_alpha(),
            "privilage_escalation": "sudo",
            "privilage_escalation_username": fauxfactory.gen_alpha(),
            "privilage_escalation_password": fauxfactory.gen_alpha()
        }
    ),
    (
        "Scm",
        {
            "username": fauxfactory.gen_alpha(),
            "password": fauxfactory.gen_alpha(),
        }
    ),
    (
        "Amazon",
Beispiel #53
0
def vmware_vm(request, vmware_provider):
    vm = VM.factory("test_control_{}".format(fauxfactory.gen_alpha().lower()),
                    vmware_provider)
    vm.create_on_provider(find_in_cfme=True)
    request.addfinalizer(vm.delete_from_provider)
    return vm
Beispiel #54
0
def namespace(domain):
    """This fixture used to create automate namespace - Datastore/Domain/Namespace"""
    namespace = domain.namespaces.create(name=fauxfactory.gen_alpha(),
                                         description=fauxfactory.gen_alpha())
    yield namespace
    namespace.delete_if_exists()
Beispiel #55
0
def test_invoke_custom_automation(request, action_collection):
    """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear.

    Steps:
        * Go create new action, select Invoke Custom Automation
        * The form with additional fields should appear
    """
    # The action is to have all possible fields filled, that way we can ensure it is good
    action = action_collection.create(
        fauxfactory.gen_alpha(), "Invoke a Custom Automation",
        dict(message=fauxfactory.gen_alpha(),
             request=fauxfactory.gen_alpha(),
             attribute_1=fauxfactory.gen_alpha(),
             value_1=fauxfactory.gen_alpha(),
             attribute_2=fauxfactory.gen_alpha(),
             value_2=fauxfactory.gen_alpha(),
             attribute_3=fauxfactory.gen_alpha(),
             value_3=fauxfactory.gen_alpha(),
             attribute_4=fauxfactory.gen_alpha(),
             value_4=fauxfactory.gen_alpha(),
             attribute_5=fauxfactory.gen_alpha(),
             value_5=fauxfactory.gen_alpha()))
    request.addfinalizer(lambda: action.delete() if action.exists else None)
Beispiel #56
0
def test_invoke_custom_automation(request, appliance):
    """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear.

    Steps:
        * Go create new action, select Invoke Custom Automation
        * The form with additional fields should appear

    Polarion:
        assignee: jdupuy
        casecomponent: Control
        caseimportance: medium
        initialEstimate: 1/6h

    Bugzilla:
        1243357
    """
    # The action is to have all possible fields filled, that way we can ensure it is good
    action = appliance.collections.actions.create(
        fauxfactory.gen_alpha(),
        "Invoke a Custom Automation",
        dict(
            message=fauxfactory.gen_alpha(),
            request=fauxfactory.gen_alpha(),
            attribute_1=fauxfactory.gen_alpha(),
            value_1=fauxfactory.gen_alpha(),
            attribute_2=fauxfactory.gen_alpha(),
            value_2=fauxfactory.gen_alpha(),
            attribute_3=fauxfactory.gen_alpha(),
            value_3=fauxfactory.gen_alpha(),
            attribute_4=fauxfactory.gen_alpha(),
            value_4=fauxfactory.gen_alpha(),
            attribute_5=fauxfactory.gen_alpha(),
            value_5=fauxfactory.gen_alpha()
        )
    )
    request.addfinalizer(lambda: action.delete() if action.exists else None)
Beispiel #57
0
def test_automate_relationship_trailing_spaces(request, klass, namespace, domain):
    """
    Handle trailing whitespaces in automate instance relationships.

    Polarion:
        assignee: ghubale
        initialEstimate: 1/10h
        caseimportance: medium
        caseposneg: positive
        testtype: functional
        startsin: 5.9
        casecomponent: Automate
        tags: automate
        title: Test automate relationship trailing spaces
        testSteps:
            1. Create a class and its instance, also create second one,
               that has a relationship field.
            2. Create an instance with the relationship field pointing to the first class'
               instance but add a couple of whitespaces after it.
            3. Execute the AE model, eg. using Simulate.
        expectedResults:
            1.
            2.
            3. Logs contain no resolution errors.

    PR:
        https://github.com/ManageIQ/manageiq/pull/7550
    """
    # Message to display in automation log by executing method of klass
    catch_string = fauxfactory.gen_alphanumeric()

    # Added method1 for klass1
    method = klass.methods.create(
        name=fauxfactory.gen_alphanumeric(),
        location='inline',
        script='$evm.log(:info, "{}")'.format(catch_string)
    )
    request.addfinalizer(method.delete_if_exists)

    # Added schema for klass1 with type method for calling the method1 in same klass1
    klass.schema.add_fields({'name': 'meth', 'type': 'Method', 'data_type': 'String'})

    # Created instance1 to execute method1
    instance = klass.instances.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric(),
        fields={'meth': {'value': method.name}}
    )
    request.addfinalizer(instance.delete_if_exists)

    # Created klass2 under same domain/namespace
    klass2 = namespace.classes.create(
        name=fauxfactory.gen_alpha(),
        display_name=fauxfactory.gen_alpha(),
        description=fauxfactory.gen_alpha()
    )
    request.addfinalizer(klass2.delete_if_exists)

    # Added schema for klass2 with type Relationship for calling instance1 of klass1
    klass2.schema.add_fields({'name': 'rel', 'type': 'Relationship', 'data_type': 'String'})

    # Created instance2 of klass2 and called instance1 of klass1. Here couple of white spaces are
    # added in the value field of rel type.
    instance2 = klass2.instances.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric(),
        fields={
            "rel": {
                "value": "/{domain}/{namespace}/{klass}/{instance}   ".format(
                    domain=domain.name,
                    namespace=namespace.name,
                    klass=klass.name,
                    instance=instance.name,
                )
            }
        },
    )
    request.addfinalizer(instance2.delete_if_exists)

    # Checking if automation log is giving resolution error or not by searching 'ERROR'.
    # Also checking if method1 of klass1 is executed successfully or not by searching 'catch_string'
    # in automation log.
    result = LogValidator(
        "/var/www/miq/vmdb/log/automation.log", matched_patterns=[".*{}.*".format(catch_string)],
        failure_patterns=[".*ERROR.*"]
    )
    result.start_monitoring()

    # Executing the automate method of klass1 using simulation
    simulate(
        appliance=klass.appliance,
        request="Call_Instance",
        attributes_values={
            "namespace": "{}/{}".format(domain.name, namespace.name),
            "class": klass2.name,
            "instance": instance2.name,
        },
    )
    assert result.validate(wait="60s")
Beispiel #58
0
def test_vmware_vimapi_hotadd_disk(
        appliance, request, testing_group, provider, testing_vm, domain, cls):
    """ Tests hot adding a disk to vmware vm.

    This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi``

    Steps:
        * It creates an instance in ``System/Request`` that can be accessible from eg. a button.
        * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The
            button shall belong in the VM and instance button group.
        * After the button is created, it goes to a VM's summary page, clicks the button.
        * The test waits until the capacity of disks is raised.

    Metadata:
        test_flag: hotdisk, provision
    """
    meth = cls.methods.create(
        name='load_value_{}'.format(fauxfactory.gen_alpha()),
        script=dedent('''\
            # Sets the capacity of the new disk.

            $evm.root['size'] = 1  # GB
            exit MIQ_OK
            '''))

    request.addfinalizer(meth.delete_if_exists)

    # Instance that calls the method and is accessible from the button
    instance = cls.instances.create(
        name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()),
        fields={
            "meth4": {'value': meth.name},  # To get the value
            "rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"},
        },
    )

    request.addfinalizer(instance.delete_if_exists)

    # Button that will invoke the dialog and action
    button_name = fauxfactory.gen_alphanumeric()
    button = testing_group.buttons.create(
        button_class=appliance.collections.buttons.DEFAULT,
        text=button_name,
        hover=button_name, system="Request", request=instance.name)
    request.addfinalizer(button.delete_if_exists)

    def _get_disk_capacity():
        view = testing_vm.load_details(refresh=True)
        return view.entities.summary('Datastore Allocation Summary').get_text_of('Total Allocation')

    original_disk_capacity = _get_disk_capacity()
    logger.info('Initial disk allocation: %s', original_disk_capacity)

    class CustomButtonView(View):
        custom_button = Dropdown(testing_group.text)

    view = appliance.browser.create_view(CustomButtonView)
    view.custom_button.item_select(button.text)

    view = appliance.browser.create_view(BaseLoggedInPage)
    view.flash.assert_no_error()
    try:
        wait_for(
            lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5)
    finally:
        logger.info('End disk capacity: %s', _get_disk_capacity())
def vm_name(provider):
    return "test_act-{}-{}".format(provider.key, fauxfactory.gen_alpha().lower())
Beispiel #60
0
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.update import update

# Tests concerning database authentication
pytestmark = [test_requirements.auth]

TEST_PASSWORDS = [
    f"{fauxfactory.gen_alpha()} ",  # trailing whitespace
    f" {fauxfactory.gen_alpha()}",  # leading whitespace
    f"$#!{fauxfactory.gen_alpha()}",  # leading spec char
    f"{fauxfactory.gen_alpha(17)}",  # pw > 16 char
    "",  # blank
    fauxfactory.gen_alpha().upper(),  # uppercase char
    r"$%&'()*+,-./:;<=>?@[\]^_{|}~",  # special char only
]


@pytest.fixture
def user(appliance):
    name = fauxfactory.gen_alpha(15, start="test-user-")
    creds = Credential(principal=name, secret=fauxfactory.gen_alpha())
    user_group = appliance.collections.groups.instantiate(
        description="EvmGroup-vm_user")
    user = appliance.collections.users.create(
        name=name,
        credential=creds,
        groups=user_group,
    )