Exemplo n.º 1
0
def test_20_delete_unreachable():
    '''
    add a Load-balancer, make it unreachable, and see if it can still be deleted from the RHUA
    '''
    # for RHBZ#1639996
    status = RHUICLI.add(RHUA, "haproxy", HA_HOSTNAME, unsafe=True)
    nose.tools.ok_(status, msg="unexpected installation status: %s" % status)
    hap_list = RHUICLI.list(RHUA, "haproxy")
    nose.tools.eq_(hap_list, [HA_HOSTNAME])

    Helpers.break_hostname(RHUA, HA_HOSTNAME)

    # delete it
    status = RHUICLI.delete(RHUA, "haproxy", [HA_HOSTNAME], force=True)
    nose.tools.ok_(status, msg="unexpected deletion status: %s" % status)
    # check it
    hap_list = RHUICLI.list(RHUA, "haproxy")
    nose.tools.eq_(hap_list, [])

    Helpers.unbreak_hostname(RHUA)

    # the node remains configured (haproxy)... unconfigure it properly
    # do so by adding and deleting it again
    RHUICLI.add(RHUA, "haproxy", HA_HOSTNAME, unsafe=True)
    RHUICLI.delete(RHUA, "haproxy", [HA_HOSTNAME], force=True)

    # clean up the SSH key
    ConMgr.remove_ssh_keys(RHUA, [HA_HOSTNAME])
Exemplo n.º 2
0
def test_20_delete_unreachable():
    '''
    add a CDS, make it unreachable, and see if it can still be deleted from the RHUA
    '''
    # for RHBZ#1639996
    # choose a random CDS hostname from the list
    cds = random.choice(CDS_HOSTNAMES)
    status = RHUICLI.add(RHUA, "cds", cds, unsafe=True)
    nose.tools.ok_(status, msg="unexpected installation status: %s" % status)
    cds_list = RHUICLI.list(RHUA, "cds")
    nose.tools.eq_(cds_list, [cds])

    Helpers.break_hostname(RHUA, cds)

    # delete it
    status = RHUICLI.delete(RHUA, "cds", [cds], force=True)
    nose.tools.ok_(status, msg="unexpected deletion status: %s" % status)
    # check it
    cds_list = RHUICLI.list(RHUA, "cds")
    nose.tools.eq_(cds_list, [])

    Helpers.unbreak_hostname(RHUA)

    # the node remains configured (RHUI mount point, httpd)... unconfigure it properly
    # do so by adding and deleting it again
    RHUICLI.add(RHUA, "cds", cds, unsafe=True)
    RHUICLI.delete(RHUA, "cds", [cds], force=True)
Exemplo n.º 3
0
def test_05_check_haproxy_cfg():
    """check if the second CDS was added to the HAProxy configuration file"""
    if not CDS2_EXISTS:
        raise nose.exc.SkipTest("The second CDS does not exist")
    nose.tools.ok_(Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[1]))
    # also check if the first one is still there
    nose.tools.ok_(Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[0]))
Exemplo n.º 4
0
def test_08_qpid_linearstore():
    '''
        check if the qpid-cpp-server-linearstore package is available
    '''
    # for RHBZ#1702254
    needs_registration = not Helpers.is_iso_installation(RHUA) and not Helpers.is_registered(RHUA)
    if needs_registration:
        with open("/etc/rhui3_tests/tested_repos.yaml") as configfile:
            cfg = yaml.load(configfile)
        sub = cfg["subscriptions"]["RHUI"]
        RHSMRHUI.register_system(RHUA)
        RHSMRHUI.attach_subscription(RHUA, sub)
        RHSMRHUI.enable_rhui_repo(RHUA, False)
    Expect.expect_retval(RHUA, "yum list qpid-cpp-server-linearstore", timeout=30)
    if needs_registration:
        RHSMRHUI.unregister_system(RHUA)
Exemplo n.º 5
0
def test_21_check_cleanup():
    '''
    check if the haproxy service was stopped
    '''
    # for RHBZ#1640002
    nose.tools.ok_(not Helpers.check_service(HAPROXY, "haproxy"),
                   msg="haproxy is still running on %s" % HA_HOSTNAME)
 def test_17_legacy_ca():
     '''
         check for bogus error messages if a legacy CA is used
     '''
     # for RHBZ#1731856
     # get the CA cert from the RHUA and upload it to the CDS
     # the cert is among the extra RHUI files, ie. in the directory also containing custom RPMs
     cds_lb = ConMgr.get_cds_lb_hostname()
     remote_ca_file = join(CUSTOM_RPMS_DIR, LEGACY_CA_FILE)
     local_ca_file = join(TMPDIR, LEGACY_CA_FILE)
     Util.fetch(RHUA, remote_ca_file, local_ca_file)
     Helpers.add_legacy_ca(CDS, local_ca_file)
     # re-fetch repodata on the client to trigger the OID validator on the CDS
     Expect.expect_retval(CLI, "yum clean all ; yum repolist enabled")
     Expect.expect_retval(
         CDS, "egrep 'Cert verification failed against [0-9]+ ca cert' " +
         "/var/log/httpd/%s_error_ssl.log" % cds_lb, 1)
 def test_99_cleanup(self):
     '''
        remove repos, certs, cli rpms; remove rpms from cli, uninstall cds, hap
     '''
     test_rpm_name = self.custom_rpm.rsplit('-', 2)[0]
     RHUIManagerRepo.delete_all_repos(RHUA)
     nose.tools.assert_equal(RHUIManagerRepo.list(RHUA), [])
     Expect.expect_retval(RHUA, "rm -f /root/test_ent_cli*")
     Expect.expect_retval(RHUA, "rm -rf /root/test_cli_rpm-3.0/")
     Util.remove_rpm(CLI,
                     [self.test_package, "test_cli_rpm", test_rpm_name])
     rmtree(TMPDIR)
     Helpers.del_legacy_ca(CDS, LEGACY_CA_FILE)
     if not getenv("RHUISKIPSETUP"):
         RHUIManagerInstance.delete_all(RHUA, "loadbalancers")
         RHUIManagerInstance.delete_all(RHUA, "cds")
         RHUIManager.remove_rh_certs(RHUA)
Exemplo n.º 8
0
 def register_system(connection,
                     username="",
                     password="",
                     fail_if_registered=False):
     """register with RHSM"""
     # if username or password isn't specified, it will be obtained using
     # the get_credentials method on the remote host -- only usable with the RHUA
     # if the system is already registered, it will be unregistered first,
     # unless fail_if_registered == True
     if fail_if_registered and Helpers.is_registered(connection):
         raise RuntimeError("The system is already registered.")
     if not username or not password:
         username, password = Helpers.get_credentials(connection)
     Expect.expect_retval(
         connection,
         "subscription-manager register --force --type rhui " +
         "--username %s --password %s" % (username, password),
         timeout=60)
Exemplo n.º 9
0
def test_10_check_cleanup():
    '''
        check if Apache was stopped and the remote file system unmounted on all CDSs
    '''
    service = "httpd"
    mdir = "/var/lib/rhui/remote_share"
    dirty_hosts = dict()
    errors = []

    dirty_hosts["httpd"] = [cds.hostname for cds in CDS if Helpers.check_service(cds, service)]
    dirty_hosts["mount"] = [cds.hostname for cds in CDS if Helpers.check_mountpoint(cds, mdir)]

    if dirty_hosts["httpd"]:
        errors.append("Apache is still running on %s" % dirty_hosts["httpd"])
    if dirty_hosts["mount"]:
        errors.append("The remote file system is still mounted on %s" % dirty_hosts["mount"])

    nose.tools.ok_(not errors, msg=errors)
Exemplo n.º 10
0
 def test_04_add_containers(self):
     '''
        add containers
     '''
     # first, add a container from RH
     # get credentials and enter them when prompted
     credentials = Helpers.get_credentials(RHUA)
     RHUIManagerRepo.add_container(RHUA, self.container_name,
                                   self.container_id,
                                   self.container_displayname,
                                   [""] + credentials)
     # second, add a container from Quay
     # get Quay credentials
     credentials = Helpers.get_credentials(RHUA, "quay")
     quay_url = Helpers.get_registry_url("quay")
     RHUIManagerRepo.add_container(RHUA,
                                   self.container_quay["name"],
                                   credentials=[quay_url] + credentials)
     # third, add a container from the Docker hub
     docker_url = Helpers.get_registry_url("docker")
     RHUIManagerRepo.add_container(RHUA,
                                   self.container_docker["name"],
                                   credentials=[docker_url])
Exemplo n.º 11
0
def test_12_delete_unreachable():
    '''
    add a CDS, make it unreachable, and see if it can still be deleted from the RHUA
    '''
    # for RHBZ#1639996
    # choose a random CDS hostname from the list
    cds = random.choice(CDS_HOSTNAMES)
    RHUIManagerInstance.add_instance(RHUA, "cds", cds)
    cds_list = RHUIManagerInstance.list(RHUA, "cds")
    nose.tools.assert_not_equal(cds_list, [])

    Helpers.break_hostname(RHUA, cds)

    # delete it
    RHUIManagerInstance.delete(RHUA, "cds", [cds])
    # check it
    cds_list = RHUIManagerInstance.list(RHUA, "cds")
    nose.tools.assert_equal(cds_list, [])

    Helpers.unbreak_hostname(RHUA)

    # the node remains configured (RHUI mount point, httpd)... unconfigure it properly
    RHUIManagerInstance.add_instance(RHUA, "cds", cds)
    RHUIManagerInstance.delete(RHUA, "cds", [cds])
 def add_container(connection, containername, containerid="", displayname="", credentials=""):
     '''
     add a new Red Hat container
     '''
     default_registry = Helpers.get_registry_url("default", connection)
     # if the credentials parameter is supplied, it's supposed to be a list containing:
     #   0 - registry hostname if not using the default one
     #   1 - username (if required; the default registry requires the RH (CCSP) login)
     #   2 - password (if required)
     # do NOT supply them if they're in rhui-tools.conf and you want to use the default registry;
     # this method will fail otherwise, because it will expect rhui-manager to ask for them
     RHUIManager.screen(connection, "repo")
     Expect.enter(connection, "ad")
     Expect.expect(connection, "Specify URL of registry .*:")
     if credentials and credentials[0]:
         registry = credentials[0]
         Expect.enter(connection, registry)
     else:
         registry = default_registry
         Expect.enter(connection, "")
     Expect.expect(connection, "Name of the container in the registry:")
     Expect.enter(connection, containername)
     Expect.expect(connection, "Unique ID for the container .*]", 60)
     Expect.enter(connection, containerid)
     Expect.expect(connection, "Display name for the container.*]:")
     Expect.enter(connection, displayname)
     # login & password provided, or a non-default registry specified
     if credentials or registry != default_registry:
         Expect.expect(connection, "Registry username:"******"Registry password:"******"")
     if not containerid:
         containerid = Util.safe_pulp_repo_name(containername)
     if not displayname:
         displayname = Util.safe_pulp_repo_name(containername)
     RHUIManager.proceed_with_check(connection,
                                    "The following container will be added:",
                                    ["Registry URL: " + registry,
                                     "Container Id: " + containerid,
                                     "Display Name: " + displayname,
                                     "Upstream Container Name: " + containername])
     RHUIManager.quit(connection)
Exemplo n.º 13
0
 def test_14_add_containers(self):
     '''add containers'''
     # use saved credentials; save them in the RHUI configuration first
     # first a RH container
     Helpers.set_registry_credentials(RHUA)
     RHUIManagerRepo.add_container(RHUA, self.containers["rh"]["name"], "",
                                   self.containers["rh"]["displayname"])
     # then a Quay container
     Helpers.set_registry_credentials(RHUA, "quay", backup=False)
     RHUIManagerRepo.add_container(RHUA,
                                   self.containers["alt"]["quay"]["name"])
     # and finaly a Docker container; we'll need the Docker Hub URL as there's no
     # auth config for it
     url = Helpers.get_registry_url("docker")
     Helpers.set_registry_credentials(RHUA, "docker", [url], backup=False)
     RHUIManagerRepo.add_container(RHUA,
                                   self.containers["alt"]["docker"]["name"])
     # check all of that
     repo_list = RHUIManagerRepo.list(RHUA)
     nose.tools.ok_(
         len(repo_list) == 3,
         msg="The containers weren't added. Actual repolist: %s" %
         repo_list)
Exemplo n.º 14
0
 def test_16_delete_containers():
     '''delete the containers'''
     Helpers.restore_rhui_tools_conf(RHUA)
     RHUIManagerRepo.delete_all_repos(RHUA)
     nose.tools.ok_(not RHUIManagerRepo.list(RHUA))
Exemplo n.º 15
0
def test_11_check_haproxy_cfg():
    """check if the first CDS was added to the HAProxy configuration file"""
    nose.tools.ok_(Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[0]))
Exemplo n.º 16
0
def test_09_check_haproxy_cfg():
    """check if the first CDS was deleted from the HAProxy configuration file"""
    nose.tools.ok_(not Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[0]))
Exemplo n.º 17
0
def test_07_check_haproxy_cfg():
    """check if the second CDS (and only it) was deleted from the HAProxy configuration file"""
    if not CDS2_EXISTS:
        raise nose.exc.SkipTest("The second CDS does not exist")
    nose.tools.ok_(not Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[1]))
    nose.tools.ok_(Helpers.cds_in_haproxy_cfg(HAPROXY, CDS_HOSTNAMES[0]))
Exemplo n.º 18
0
CDS_LB = ConMgr.get_cds_lb_hostname()
WANTED_FILES_RHUA = [
    "/etc/rhui-installer/answers.yaml", "/etc/rhui/rhui-tools.conf",
    "/root/.rhui/rhui.log", "/var/log/kafo/configuration.log",
    "/var/log/rhui-subscription-sync.log"
]
WANTED_FILES_CDS = [
    "/etc/httpd/conf.d/03-crane.conf",
    "/etc/httpd/conf.d/25-%s.conf" % CDS_LB, "/etc/pulp/",
    "/var/log/httpd/%s_access_ssl.log" % CDS_LB,
    "/var/log/httpd/%s_error_ssl.log" % CDS_LB
]

CMD_RHUA = "rhui-manager status"
CMD_CDS = "ls -lR /var/lib/rhui/remote_share"
WANTED_FILES_RHUA.append(Helpers.encode_sos_command(CMD_RHUA))
WANTED_FILES_CDS.append(Helpers.encode_sos_command(CMD_CDS))


def setup():
    '''
        announce the beginning of the test run
    '''
    print("*** Running %s: *** " % basename(__file__))


def test_00_rhui_init():
    '''
        add a CDS and run rhui-subscription-sync to ensure their log files exist
    '''
    #  use initial_run first to ensure we're logged in to rhui-manager