コード例 #1
0
def _modify_firewall_rules(firewall_rules_path):
    """
    :param firewall_rules_path: Path to the firewalls rules file (including the file name)
    """
    dc_region = 'RegionOne' if ProjVar.get_var('IS_DC') else None

    ssh_client = ControllerClient.get_active_controller(name=dc_region)
    LOG.info("Install firewall rules: {}".format(firewall_rules_path))
    auth_info = Tenant.get('admin_platform', dc_region=dc_region)
    start_time = common.get_date_in_format(ssh_client=ssh_client)
    time.sleep(1)
    cli.system('firewall-rules-install',
               firewall_rules_path,
               ssh_client=ssh_client,
               auth_info=auth_info)

    def _wait_for_config_apply(auth_info_, con_ssh_=None):
        controllers = system_helper.get_controllers(auth_info=auth_info_,
                                                    con_ssh=con_ssh_)
        for controller in controllers:
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=60,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'set'
                })
            # Extend timeout for controller-1 config-out-date clear to 5min due to CGTS-8497
            system_helper.wait_for_events(
                start=start_time,
                fail_ok=False,
                timeout=300,
                entity_instance_id='host=controller',
                event_log_id=EventLogID.CONFIG_OUT_OF_DATE,
                auth_info=auth_info_,
                con_ssh=con_ssh_,
                **{
                    'Entity Instance ID': 'host={}'.format(controller),
                    'State': 'clear'
                })

    LOG.info("Wait for config to apply on both controllers")
    _wait_for_config_apply(auth_info_=auth_info, con_ssh_=ssh_client)

    if ProjVar.get_var('IS_DC'):
        subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
        LOG.info(
            "Wait for sync audit for {} in dcmanager.log".format(subcloud))
        dc_helper.wait_for_sync_audit(subclouds=subcloud)

        LOG.info("Wait for config apply on {}".format(subcloud))
        _wait_for_config_apply(auth_info_=Tenant.get('admin_platform'))

    # Ensures iptables has enough time to populate the list with new ports
    time.sleep(10)
コード例 #2
0
    def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(primary_subcloud))
        dc_helper.manage_subcloud(primary_subcloud)

        LOG.fixture_step("Delete new keypair on central region")
        nova_helper.delete_keypairs(keypairs=NEW_KEYPAIR, auth_info=central_auth)

        LOG.fixture_step("Wait for sync audit on {} and keypair to sync over".
                         format(primary_subcloud))
        dc_helper.wait_for_sync_audit(subclouds=primary_subcloud, filters_regex='keypair')
        dc_helper.wait_for_subcloud_keypair(primary_subcloud, expected_keypair=central_keypair, timeout=60,
                                            check_interval=10)
コード例 #3
0
def test_dc_modify_https(revert_https):
    """
    Test enable/disable https

    Test Steps:
        - Ensure central region https to be different than subcloud
        - Wait for subcloud sync audit and ensure subcloud https is not changed
        - Verify cli's in subcloud and central region
        - Modify https on central and subcloud
        - Verify cli's in subcloud and central region

    Teardown:
        - Revert https config on central and subcloud

    """
    origin_https_sub, origin_https_central, central_auth, sub_auth = revert_https
    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    new_https_sub = not origin_https_sub
    new_https_central = not origin_https_central

    LOG.tc_step(
        "Ensure central region https to be different than {}".format(subcloud))
    security_helper.modify_https(enable_https=new_https_sub,
                                 auth_info=central_auth)

    LOG.tc_step(
        "Wait for subcloud sync audit and ensure {} https is not changed".
        format(subcloud))
    dc_helper.wait_for_sync_audit(subclouds=subcloud)
    assert origin_https_sub == keystone_helper.is_https_enabled(
        auth_info=sub_auth), "HTTPS config changed in subcloud"

    LOG.tc_step("Verify cli's in {} and central region".format(subcloud))
    verify_cli(sub_auth, central_auth)

    if new_https_central != new_https_sub:
        LOG.tc_step("Set central region https to {}".format(new_https_central))
        security_helper.modify_https(enable_https=new_https_central,
                                     auth_info=central_auth)

    LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub))
    security_helper.modify_https(enable_https=new_https_sub,
                                 auth_info=sub_auth)

    LOG.tc_step(
        "Verify cli's in {} and central region after https modify on subcloud".
        format(subcloud))
    verify_cli(sub_auth, central_auth)
コード例 #4
0
def test_dc_keypair(keypair_precheck):
    """

    Create keypair on central region and check it is propagated to subclouds
    Args:
        keypair_precheck: test fixture for setup/teardown

    Setups:
        - Ensure primary subcloud is managed and keypair info is synced

    Test Steps:
        - Un-manage primary subcloud
        - Add a new keypair on central region
        - Wait for new keypair to sync over to managed online subclouds
        - Ensure central keypair is not updated on unmanaged primary subcloud
        - Re-manage primary subcloud and ensure new keypair syncs over

    Teardown:
        - Delete new created keypair

    """
    primary_subcloud, managed_subclouds, central_keypair = keypair_precheck
    central_auth = Tenant.get('admin', dc_region='RegionOne')

    LOG.tc_step("Unmanage {}".format(primary_subcloud))
    dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=False)

    LOG.tc_step('Add new keypair to central region')
    nova_helper.create_keypair(NEW_KEYPAIR, auth_info=central_auth)

    LOG.tc_step("Wait for new keypair to sync over to managed subclouds: {}".format(managed_subclouds))
    expt_keypair = central_keypair + [NEW_KEYPAIR]
    dc_helper.wait_for_sync_audit(subclouds=managed_subclouds, filters_regex='keypair')
    for managed_sub in managed_subclouds:
        dc_helper.wait_for_subcloud_keypair(subcloud=managed_sub, expected_keypair=expt_keypair,
                                            timeout=30, check_interval=10)

    LOG.tc_step("Ensure new keypair is not synced to unmanaged subcloud: {}".format(primary_subcloud))
    code_keypair = dc_helper.wait_for_subcloud_keypair(subcloud=primary_subcloud,
                                                       expected_keypair=expt_keypair,
                                                       timeout=15, check_interval=5, fail_ok=True)[0]

    assert code_keypair == 1, "keypair is updated unexpectedly on unmanaged subcloud {}".format(primary_subcloud)

    LOG.tc_step('Re-manage {} and ensure keypair syncs over'.format(primary_subcloud))
    dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
    dc_helper.wait_for_subcloud_keypair(subcloud=primary_subcloud, expected_keypair=expt_keypair)
コード例 #5
0
    def revert():
        LOG.fixture_step("Manage {} if unmanaged".format(primary_subcloud))
        dc_helper.manage_subcloud(primary_subcloud)

        LOG.fixture_step(
            "Delete new SNMP community string and trapdest on central region")
        system_helper.delete_snmp_comm(comms=SNMP_COMM, auth_info=central_auth)
        system_helper.delete_snmp_trapdest(ip_addrs=SNMP_TRAPDEST[1],
                                           auth_info=central_auth)

        LOG.fixture_step(
            "Wait for sync audit on {} and SNMP community strings and trapdests to sync over"
            .format(primary_subcloud))
        dc_helper.wait_for_sync_audit(subclouds=primary_subcloud)
        dc_helper.wait_for_subcloud_snmp_comms(primary_subcloud,
                                               expected_comms=central_comms,
                                               timeout=60,
                                               check_interval=10)
        dc_helper.wait_for_subcloud_snmp_trapdests(
            primary_subcloud,
            expected_trapdests=central_trapdests,
            timeout=60,
            check_interval=10)
コード例 #6
0
def test_dc_modify_timezone(prev_check):
    """
    Test timezone modify on system controller and subcloud. Ensure timezone change is not
    propagated.
    Setups:
        - Ensure both central and subcloud regions are configured with UTC
        - Get the timestamps for host created_at before timezone modify

    Test Steps
        - Change the timezone in central region and wait until the change is applied
        - Change the timezone to a different zone in subcloud and wait until the change is applied
        - Verify host created_at timestamp updated according to the local timezone for the region
        - Swact on subcloud and ensure timezone and host created_at timestamp persists locally
        - Swact central controller and ensure timezone and host created_at timestamp persists
          in central and subcloud

    Teardown
        - Change timezone to UTC in both central and subcloud regions
        - Ensure host created_at timestamp is reverted to original

    """
    prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, subcloud_auth, \
        subcloud = prev_check

    LOG.tc_step("Modify timezone to {} in central region".format(central_zone))
    system_helper.modify_timezone(timezone=central_zone,
                                  auth_info=central_auth)

    LOG.tc_step(
        "Waiting for timestamp for host created_at to update in central region"
    )
    post_central_time = wait_for_timestamp_update(
        prev_timestamp=prev_central_time, auth_info=central_auth)
    assert post_central_time != prev_central_time, \
        "host created_at timestamp did not update after timezone changed " \
        "to {} in central region".format(central_zone)

    LOG.tc_step("Modify timezone to {} in {}".format(sub_zone, subcloud))
    system_helper.modify_timezone(timezone=sub_zone, auth_info=subcloud_auth)

    LOG.tc_step(
        "Waiting for timestamp for same host created_at to update in {}".
        format(subcloud))
    post_sub_time = wait_for_timestamp_update(prev_timestamp=prev_sub_time,
                                              auth_info=subcloud_auth)
    assert post_sub_time != prev_sub_time, \
        "host created_at timestamp did not update after timezone changed to {} " \
        "in {}".format(sub_zone, subcloud)
    assert post_sub_time != post_central_time, \
        "Host created_at timestamp is the same on central and {} when configured with different " \
        "timezones".format(subcloud)

    LOG.tc_step(
        "Ensure host created_at timestamp does not change after subcloud sync audit"
    )
    dc_helper.wait_for_sync_audit(subclouds=subcloud,
                                  fail_ok=True,
                                  timeout=660)
    post_sync_sub_time = system_helper.get_host_values(
        host='controller-0', fields='created_at', auth_info=subcloud_auth)[0]
    assert post_sub_time == post_sync_sub_time, \
        "Host created_at timestamp changed after sync audit on {}".format(subcloud)

    if not system_helper.is_aio_simplex():
        LOG.tc_step(
            "Swact in {} region and verify timezone persists locally".format(
                subcloud))
        host_helper.swact_host(auth_info=subcloud_auth)
        post_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_swact_sub_zone == sub_zone

        post_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_swact_sub_time == post_sub_time

    if system_helper.get_standby_controller_name(auth_info=central_auth):
        LOG.tc_step(
            "Swact in central region, and ensure timezone persists locally in central"
            " and subcloud")
        host_helper.swact_host(auth_info=central_auth)

        # Verify central timezone persists
        post_swact_central_zone = system_helper.get_timezone(
            auth_info=central_auth)
        assert post_swact_central_zone == central_zone
        post_swact_central_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=central_auth)[0]
        assert post_swact_central_time == post_central_time

        # Verify subcloud timezone persists
        post_central_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_central_swact_sub_zone == sub_zone
        post_central_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_central_swact_sub_time == post_sub_time
コード例 #7
0
ファイル: test_https_unshared.py プロジェクト: starlingx/test
def test_dc_modify_https(revert_https):
    """
    Test enable/disable https

    Test Steps:
        - Ensure central region and subcloud admin endpoint are https
        - Ensure central region https to be different than subcloud
        - Wait for subcloud sync audit and ensure subcloud https is not changed
        - Verify cli's in subcloud and central region
        - Modify https on central and subcloud
        - Verify cli's in subcloud and central region
        - swact central and subcloud
        - Ensure central region and subcloud admin endpoint are https

    Teardown:
        - Revert https config on central and subcloud

    """
    origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname = revert_https
    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    LOG.tc_step(
        "Before testing, Ensure central region and subcloud admin internal endpoint are https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"

    new_https_sub = not origin_https_sub
    new_https_central = not origin_https_central

    LOG.tc_step("Ensure central region https to be different than {}".format(subcloud))
    security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth)

    LOG.tc_step('Check public endpoints accessibility for central region')
    security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
                                          use_dnsname=use_dnsname)
    LOG.tc_step('Check platform horizon accessibility')
    security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)

    LOG.tc_step("Wait for subcloud sync audit with best effort and ensure {} https is not "
                "changed".format(subcloud))
    dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660)
    assert origin_https_sub == keystone_helper.is_https_enabled(auth_info=sub_auth), \
        "HTTPS config changed in subcloud"

    LOG.tc_step("Verify cli's in {} and central region".format(subcloud))
    verify_cli(sub_auth, central_auth)

    if new_https_central != new_https_sub:
        LOG.tc_step("Set central region https to {}".format(new_https_central))
        security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth)
        LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
        assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
            "Central region admin internal endpoint is not https"
        assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
            "Subcloud admin internal endpoint is not https"
        LOG.tc_step('Check public endpoints accessibility for central region')
        security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
                                              use_dnsname=use_dnsname)
        LOG.tc_step('Check platform horizon accessibility')
        security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)

    LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub))
    security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth)
    LOG.tc_step('Check public endpoints accessibility for {} region'.format(subcloud))
    security_helper.check_services_access(region=subcloud, auth_info=sub_auth,
                                          use_dnsname=use_dnsname)

    LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"

    LOG.tc_step("Verify cli's in {} and central region after https modify on "
                "subcloud".format(subcloud))
    verify_cli(sub_auth, central_auth)

    LOG.tc_step("Swact on central region")
    host_helper.swact_host(auth_info=central_auth)

    LOG.tc_step(
        "Verify cli's in {} and central region after central region swact" .format(subcloud))
    verify_cli(sub_auth, central_auth)

    if not system_helper.is_aio_simplex(auth_info=sub_auth):
        LOG.tc_step("Swact on subcloud {}".format(subcloud))
        host_helper.swact_host(auth_info=sub_auth)
        LOG.tc_step("Verify cli's in {} and central region after subcloud swact".format(subcloud))
        verify_cli(sub_auth, central_auth)

    LOG.tc_step("Ensure after swact, central region and subcloud admin internal endpoint are https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"
コード例 #8
0
def test_dc_dns_override_local_change(ensure_synced):
    """
    Verify DNS modification on subcloud will be overridden by central region config
    Args:
        ensure_synced: test fixture

    Setups:
        - Ensure primary subcloud is managed and DNS config is valid and synced

    Test Steps:
        - Un-manage primary subcloud
        - Configure DNS servers on primary subcloud to a unreachable ip address (8.4.4.4)
        - Wait for sync log for any managed subcloud with best effort
        - Ensure DNS config is not updated on unmanaged primary subcloud
        - Verify nslookup passes on central region and fails on primary subcloud
        - Re-manage primary subcloud and ensure DNS config syncs over
        - Verify nslookup in Central Region and primary subcloud are working as expected

    Teardown:
        - Manage primary subcloud if not managed (module)
        - Reset DNS servers to original value on central region (module)

    """
    primary_subcloud, managed_subclouds, sc_dns = ensure_synced
    new_dns_servers = compose_new_dns_servers(scenario='unreachable_server',
                                              prev_dns_servers=sc_dns)

    LOG.tc_step("Unmanage {}".format(primary_subcloud))
    dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)

    LOG.tc_step("Reconfigure DNS on {} from {} to {}".format(
        primary_subcloud, sc_dns, new_dns_servers))
    system_helper.set_dns_servers(new_dns_servers,
                                  auth_info=Tenant.get(
                                      'admin_platform',
                                      dc_region=primary_subcloud))

    managed_cloud = managed_subclouds[0] if managed_subclouds else ''
    LOG.tc_step(
        "Wait for sync update log for managed subcloud {} with best effort".
        format(managed_cloud))
    dc_helper.wait_for_sync_audit(subclouds=managed_cloud,
                                  fail_ok=True,
                                  timeout=660)

    LOG.tc_step(
        "Ensure DNS config is not updated on unmanaged subcloud: {}".format(
            primary_subcloud))
    code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud,
                                                  expected_dns=sc_dns,
                                                  fail_ok=True,
                                                  timeout=60)[0]
    assert 1 == code, "Actual return code: {}".format(code)

    LOG.tc_step("Verify nslookup fails on {}".format(primary_subcloud))
    central_res, local_res = verify_dns_on_central_and_subcloud(
        primary_subcloud, fail_ok=True, sc_dns=sc_dns)
    assert 0 == central_res, "nslookup failed on central region"
    assert 1 == local_res, "nslookup succeeded on {} with unreachable DNS servers configured".\
        format(primary_subcloud)

    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    if system_helper.get_standby_controller_name(auth_info=central_auth):
        LOG.tc_step("Swact in central region")
        host_helper.swact_host(auth_info=central_auth)

    LOG.tc_step(
        'Re-manage {} and ensure local DNS config is overridden by central config'
        .format(primary_subcloud))
    dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
    dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud,
                                           expected_dns=sc_dns)

    LOG.tc_step('Verify nslookup works in Central Region and {}'.format(
        primary_subcloud))
    verify_dns_on_central_and_subcloud(primary_subcloud, sc_dns=sc_dns)
コード例 #9
0
def test_dc_snmp(snmp_precheck):
    """

    Update DNS servers on central region and check it is propagated to subclouds
    Args:
        snmp_precheck: test fixture for setup/teardown

    Setups:
        - Ensure primary subcloud is managed and SNMP config is synced

    Test Steps:
        - Un-manage primary subcloud
        - Add a SNMP community string and a trapdest on unmanaged subcloud locally
        - Add a different SNMP community string and trapdest on central region
        - Wait for new SNMP configs to sync over to managed online subclouds
        - Ensure central SNMP configs are not updated on unmanaged primary subcloud
        - Re-manage primary subcloud and ensure DNS config syncs over
        - Verify nslookup works in Central Region and primary subcloud

    Teardown:
        - Delete DNS servers to original value (module)

    """
    primary_subcloud, managed_subclouds, central_comms, central_trapdests = snmp_precheck
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    sub_auth = Tenant.get('admin_platform', dc_region=primary_subcloud)

    LOG.tc_step("Unmanage {}".format(primary_subcloud))
    dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=False)

    LOG.tc_step(
        'Add SNMP community string and trapdest to unmanaged subcloud - {}'.
        format(primary_subcloud, SNMP_COMM))
    system_helper.create_snmp_comm('cgcsauto_comm_local', auth_info=sub_auth)
    system_helper.create_snmp_trapdest(comm_string="cgcsauto_trapdest_local",
                                       ip_addr='8.8.8.8',
                                       auth_info=sub_auth)

    LOG.tc_step('Add SNMP community string and trapdest to central region')
    system_helper.create_snmp_comm(SNMP_COMM, auth_info=central_auth)
    system_helper.create_snmp_trapdest(comm_string=SNMP_TRAPDEST[0],
                                       ip_addr=SNMP_TRAPDEST[1],
                                       auth_info=central_auth)

    LOG.tc_step(
        "Wait for new SNMP config to sync over to managed subclouds: {}".
        format(managed_subclouds))
    expt_comms = central_comms + [SNMP_COMM]
    expt_trapdests = central_trapdests + [SNMP_TRAPDEST[1]]
    dc_helper.wait_for_sync_audit(subclouds=managed_subclouds)
    for managed_sub in managed_subclouds:
        dc_helper.wait_for_subcloud_snmp_comms(subcloud=managed_sub,
                                               expected_comms=expt_comms,
                                               timeout=30,
                                               check_interval=10)
        dc_helper.wait_for_subcloud_snmp_trapdests(
            subcloud=managed_sub,
            expected_trapdests=expt_trapdests,
            timeout=30,
            check_interval=10)

    LOG.tc_step(
        "Ensure central SNMP config is not synced to unmanaged subcloud: {}".
        format(primary_subcloud))
    code_comm = dc_helper.wait_for_subcloud_snmp_comms(
        subcloud=primary_subcloud,
        expected_comms=expt_comms,
        timeout=15,
        check_interval=5,
        fail_ok=True)[0]
    code_trapdest = dc_helper.wait_for_subcloud_snmp_trapdests(
        subcloud=primary_subcloud,
        expected_trapdests=expt_trapdests,
        timeout=15,
        check_interval=5,
        fail_ok=True)[0]
    assert code_comm == 1, "SNMP comm is updated unexpectedly on unmanaged subcloud {}".format(
        primary_subcloud)
    assert code_trapdest == 1, "SNMP trapdest is updated unexpectedly on unmanaged subcloud {}".format(
        primary_subcloud)

    LOG.tc_step('Re-manage {} and ensure DNS config syncs over'.format(
        primary_subcloud))
    dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False)
    dc_helper.wait_for_subcloud_snmp_comms(subcloud=primary_subcloud,
                                           expected_comms=expt_comms)
    dc_helper.wait_for_subcloud_snmp_trapdests(
        subcloud=primary_subcloud,
        expected_trapdests=expt_trapdests,
        timeout=30,
        check_interval=10)