Esempio n. 1
0
    def test_delete(self):
        if do_not_delete_instance():
            report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was specified, "
                       "skipping delete...")
            raise SkipTest("TESTS_DO_NOT_DELETE_INSTANCE was specified.")
        global dbaas
        if not hasattr(instance_info, "initial_result"):
            raise SkipTest("Instance was never created, skipping test...")
        if WHITE_BOX:
            # Change this code to get the volume using the API.
            # That way we can keep it while keeping it black box.
            admin_context = context.get_admin_context()
            volumes = db.volume_get_all_by_instance(admin_context(),
                                                    instance_info.local_id)
            instance_info.volume_id = volumes[0].id
        # Update the report so the logs inside the instance will be saved.
        report.update()
        dbaas.instances.delete(instance_info.id)

        attempts = 0
        try:
            time.sleep(1)
            result = True
            while result is not None:
                attempts += 1
                result = dbaas.instances.get(instance_info.id)
                assert_equal(200, dbaas.last_http_code)
                assert_equal("SHUTDOWN", result.status)
        except exceptions.NotFound:
            pass
        except Exception as ex:
            fail("A failure occured when trying to GET instance %s for the %d "
                 "time: %s" % (str(instance_info.id), attempts, str(ex)))
    def test_delete(self):
        if do_not_delete_instance():
            report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was specified, "
                       "skipping delete...")
            raise SkipTest("TESTS_DO_NOT_DELETE_INSTANCE was specified.")
        global dbaas
        if not hasattr(instance_info, "initial_result"):
            raise SkipTest("Instance was never created, skipping test...")
        if WHITE_BOX:
            # Change this code to get the volume using the API.
            # That way we can keep it while keeping it black box.
            admin_context = context.get_admin_context()
            volumes = db.volume_get_all_by_instance(admin_context(),
                                                    instance_info.local_id)
            instance_info.volume_id = volumes[0].id
        # Update the report so the logs inside the instance will be saved.
        report.update()
        dbaas.instances.delete(instance_info.id)

        attempts = 0
        try:
            time.sleep(1)
            result = True
            while result is not None:
                attempts += 1
                result = dbaas.instances.get(instance_info.id)
                assert_equal(200, dbaas.last_http_code)
                assert_equal("SHUTDOWN", result.status)
        except exceptions.NotFound:
            pass
        except Exception as ex:
            fail("A failure occured when trying to GET instance %s for the %d "
                 "time: %s" % (str(instance_info.id), attempts, str(ex)))
Esempio n. 3
0
 def set_up(self):
     """Create a ton of instances."""
     super(Create_11, self).set_up()
     #self.delete_instances()
     self.create_instances()
     report.log("Create_11: Created the following batch of instances:")
     for id in self.ids:
         report.log(id)
Esempio n. 4
0
 def set_up(self):
     """Create a ton of instances."""
     super(Create_11, self).set_up()
     #self.delete_instances()
     self.create_instances()
     report.log("Create_11: Created the following batch of instances:")
     for id in self.ids:
         report.log(id)
Esempio n. 5
0
 def __cb(*args, **kwargs):
     # While %s turns a var into a string but in some rare cases explicit
     # str() is less likely to raise an exception.
     arg_strs = [repr(arg) for arg in args]
     arg_strs += ['%s=%s' % (repr(key), repr(value))
                  for (key, value) in kwargs.items()]
     report.log("[RDC] Calling : %s(%s)..." % (name, ','.join(arg_strs)))
     value = func(*args, **kwargs)
     report.log("[RDC]     returned %s." % str(value))
     return value
Esempio n. 6
0
 def resize_should_not_delete_users(self):
     """Resize should not delete users."""
     # Resize has an incredibly weird bug where users are deleted after
     # a resize. The code below is an attempt to catch this while proceeding
     # with the rest of the test (note the use of runs_after).
     if USE_IP:
         self.connection.connect()
         if not self.connection.is_connected():
             # Ok, this is def. a failure, but before we toss up an error
             # lets recreate to see how far we can get.
             report.log("Having to recreate the test_user! Resizing "
                        "somehow killed it!")
             self.log_current_users()
             self.create_user()
             fail("Somehow, the resize made the test user disappear.")
    def test_create(self):
        databases = []
        databases.append({"name": "firstdb", "character_set": "latin2",
                          "collate": "latin2_general_ci"})
        databases.append({"name": "db2"})
        instance_info.databases = databases
        users = []
        users.append({"name": "lite", "password": "******",
                      "databases": [{"name": "firstdb"}]})
        instance_info.users = users
        instance_info.volume = {'size': 2}

        if create_new_instance():
            instance_info.initial_result = dbaas.instances.create(
                                               instance_info.name,
                                               instance_info.dbaas_flavor_href,
                                               instance_info.volume,
                                               databases, users)
        else:
            id = existing_instance()
            instance_info.initial_result = dbaas.instances.get(id)

        result = instance_info.initial_result
        instance_info.id = result.id
        instance_info.local_id = dbapi.localid_from_uuid(result.id)

        if create_new_instance():
            assert_equal(result.status, dbaas_mapping[power_state.BUILDING])
        else:
            report.log("Test was invoked with TESTS_USE_INSTANCE_ID=%s, so no "
                       "instance was actually created." % id)
            report.log("Local id = %d" % instance_info.get_local_id())

        # Check these attrs only are returned in create response
        expected_attrs = ['created', 'flavor', 'hostname', 'id', 'links',
                          'name', 'status', 'updated', 'volume']
        if create_new_instance():
            CheckInstance(result._info).attrs_exist(
                result._info, expected_attrs, msg="Create response")
        # Don't CheckInstance if the instance already exists.
        CheckInstance(result._info).flavor()
        CheckInstance(result._info).links(result._info['links'])
        CheckInstance(result._info).volume()
    def test_create(self):
        databases = []
        databases.append({"name": "firstdb", "character_set": "latin2",
                          "collate": "latin2_general_ci"})
        databases.append({"name": "db2"})
        instance_info.databases = databases
        users = []
        users.append({"name": "lite", "password": "******",
                      "databases": [{"name": "firstdb"}]})
        instance_info.users = users
        if test_config.values['reddwarf_main_instance_has_volume']:
            instance_info.volume = {'size': 1}
        else:
            instance_info.volume = None

        if create_new_instance():
            instance_info.initial_result = dbaas.instances.create(
                                               instance_info.name,
                                               instance_info.dbaas_flavor_href,
                                               instance_info.volume,
                                               databases, users)
        else:
            id = existing_instance()
            instance_info.initial_result = dbaas.instances.get(id)

        result = instance_info.initial_result
        instance_info.id = result.id
        if WHITE_BOX:
            instance_info.local_id = dbapi.localid_from_uuid(result.id)

        report.log("Instance UUID = %s" % instance_info.id)
        if create_new_instance():
            if WHITE_BOX:
                building = dbaas_mapping[power_state.BUILDING]
                assert_equal(result.status, building)
            assert_equal("BUILD", instance_info.initial_result.status)

        else:
            report.log("Test was invoked with TESTS_USE_INSTANCE_ID=%s, so no "
                       "instance was actually created." % id)
            if WHITE_BOX:
                report.log("Local id = %d" % instance_info.get_local_id())

        # Check these attrs only are returned in create response
        expected_attrs = ['created', 'flavor', 'addresses', 'id', 'links',
                          'name', 'status', 'updated']
        if test_config.values['reddwarf_can_have_volume']:
            expected_attrs.append('volume')
        if test_config.values['reddwarf_dns_support']:
            expected_attrs.append('hostname')

        with CheckInstance(result._info) as check:
            if create_new_instance():
                check.attrs_exist(result._info, expected_attrs,
                                  msg="Create response")
            # Don't CheckInstance if the instance already exists.
            check.flavor()
            check.links(result._info['links'])
            if test_config.values['reddwarf_can_have_volume']:
                check.volume()
    def test_instance_created(self):
        if WHITE_BOX:
            # Checks the db status as well as the REST API status.
            while True:
                guest_status = dbapi.guest_status_get(instance_info.local_id)
                if guest_status.state != power_state.RUNNING:
                    result = dbaas.instances.get(instance_info.id)
                    # I think there's a small race condition which can occur
                    # between the time you grab "guest_status" and "result," so
                    # RUNNING is allowed in addition to BUILDING.
                    self.assertTrue(
                        result.status == dbaas_mapping[power_state.BUILDING] or
                        result.status == dbaas_mapping[power_state.RUNNING],
                        "Result status was %s" % result.status)
                    time.sleep(5)
                else:
                    break
            report.log("Local id = %d" % instance_info.get_local_id())
        else:
            # This version just checks the REST API status.
            def result_is_active():
                instance = dbaas.instances.get(instance_info.id)
                if instance.status == "ACTIVE":
                    return True
                else:
                    # If its not ACTIVE, anything but BUILD must be
                    # an error.
                    assert_equal("BUILD", instance.status)
                    assert_equal(instance.volume.get('used', None), None)
                    return False

            poll_until(result_is_active)
            result = dbaas.instances.get(instance_info.id)
        report.log("Created an instance, ID = %s." % instance_info.id)
        report.log("TIP:")
        report.log("Rerun the tests with TESTS_USE_INSTANCE_ID=%s to skip "
                   "ahead to this point." % instance_info.id)
        report.log("Add TESTS_DO_NOT_DELETE_INSTANCE=True to avoid deleting "
                   "the instance at the end of the tests.")
Esempio n. 10
0
    def test_instance_created(self):
        if WHITE_BOX:
            # Checks the db status as well as the REST API status.
            while True:
                guest_status = dbapi.guest_status_get(instance_info.local_id)
                if guest_status.state != power_state.RUNNING:
                    result = dbaas.instances.get(instance_info.id)
                    # I think there's a small race condition which can occur
                    # between the time you grab "guest_status" and "result," so
                    # RUNNING is allowed in addition to BUILDING.
                    self.assertTrue(
                        result.status == dbaas_mapping[power_state.BUILDING]
                        or result.status == dbaas_mapping[power_state.RUNNING],
                        "Result status was %s" % result.status)
                    time.sleep(5)
                else:
                    break
            report.log("Local id = %d" % instance_info.get_local_id())
        else:
            # This version just checks the REST API status.
            def result_is_active():
                instance = dbaas.instances.get(instance_info.id)
                if instance.status == "ACTIVE":
                    return True
                else:
                    # If its not ACTIVE, anything but BUILD must be
                    # an error.
                    assert_equal("BUILD", instance.status)
                    assert_equal(instance.volume.get('used', None), None)
                    return False

            poll_until(result_is_active)
            result = dbaas.instances.get(instance_info.id)
        report.log("Created an instance, ID = %s." % instance_info.id)
        report.log("TIP:")
        report.log("Rerun the tests with TESTS_USE_INSTANCE_ID=%s to skip "
                   "ahead to this point." % instance_info.id)
        report.log("Add TESTS_DO_NOT_DELETE_INSTANCE=True to avoid deleting "
                   "the instance at the end of the tests.")
 def test_instance_created(self):
     while True:
         guest_status = dbapi.guest_status_get(instance_info.local_id)
         if guest_status.state != power_state.RUNNING:
             result = dbaas.instances.get(instance_info.id)
             # I think there's a small race condition which can occur
             # between the time you grab "guest_status" and "result," so
             # RUNNING is allowed in addition to BUILDING.
             self.assertTrue(
                 result.status == dbaas_mapping[power_state.BUILDING] or
                 result.status == dbaas_mapping[power_state.RUNNING],
                 "Result status was %s" % result.status)
             time.sleep(5)
         else:
             break
     report.log("Created an instance, ID = %s." % instance_info.id)
     report.log("Local id = %d" % instance_info.get_local_id())
     report.log("Rerun the tests with TESTS_USE_INSTANCE_ID=%s to skip ahead "
                "to this point." % instance_info.id)
Esempio n. 12
0
def run_main(test_importer):

    add_support_for_localization()

    # Strip non-nose arguments out before passing this to nosetests

    repl = False
    nose_args = []
    conf_file = "~/test.conf"
    show_elapsed = True
    groups = []
    print("RUNNING TEST ARGS :  " + str(sys.argv))
    extra_test_conf_lines = []
    rdl_config_file = None
    nova_flag_file = None
    index = 0
    while index < len(sys.argv):
        arg = sys.argv[index]
        if arg[:2] == "-i" or arg == '--repl':
            repl = True
        elif arg[:7] == "--conf=":
            conf_file = os.path.expanduser(arg[7:])
            print("Setting TEST_CONF to " + conf_file)
            os.environ["TEST_CONF"] = conf_file
        elif arg[:8] == "--group=":
            groups.append(arg[8:])
        elif arg == "--test-config":
            if index >= len(sys.argv) - 1:
                print('Expected an argument to follow "--test-conf".')
                sys.exit()
            conf_line = sys.argv[index + 1]
            extra_test_conf_lines.append(conf_line)
        elif arg[:11] == "--flagfile=":
            pass
        elif arg[:14] == "--config-file=":
            rdl_config_file = arg[14:]
        elif arg[:13] == "--nova-flags=":
            nova_flag_file = arg[13:]
        elif arg.startswith('--hide-elapsed'):
            show_elapsed = False
        else:
            nose_args.append(arg)
        index += 1

    # Many of the test decorators depend on configuration values, so before
    # start importing modules we have to load the test config followed by the
    # flag files.
    from trove.tests.config import CONFIG

    # Find config file.
    if not "TEST_CONF" in os.environ:
        raise RuntimeError("Please define an environment variable named " +
                           "TEST_CONF with the location to a conf file.")
    file_path = os.path.expanduser(os.environ["TEST_CONF"])
    if not os.path.exists(file_path):
        raise RuntimeError("Could not find TEST_CONF at " + file_path + ".")
        # Load config file and then any lines we read from the arguments.
    CONFIG.load_from_file(file_path)
    for line in extra_test_conf_lines:
        CONFIG.load_from_line(line)

    if CONFIG.white_box:  # If white-box testing, set up the flags.
        # Handle loading up RDL's config file madness.
        initialize_rdl_config(rdl_config_file)

    # Set up the report, and print out how we're running the tests.
    from tests.util import report
    from datetime import datetime
    report.log("Trove Integration Tests, %s" % datetime.now())
    report.log("Invoked via command: " + str(sys.argv))
    report.log("Groups = " + str(groups))
    report.log("Test conf file = %s" % os.environ["TEST_CONF"])
    if CONFIG.white_box:
        report.log("")
        report.log("Test config file = %s" % rdl_config_file)
    report.log("")
    report.log("sys.path:")
    for path in sys.path:
        report.log("\t%s" % path)

    # Now that all configurations are loaded its time to import everything
    test_importer()

    atexit.register(_clean_up)

    c = config.Config(stream=sys.stdout,
                      env=os.environ,
                      verbosity=3,
                      plugins=core.DefaultPluginManager())
    runner = NovaTestRunner(stream=c.stream,
                            verbosity=c.verbosity,
                            config=c,
                            show_elapsed=show_elapsed,
                            known_bugs=CONFIG.known_bugs)
    MAIN_RUNNER = runner

    if repl:
        # Turn off the following "feature" of the unittest module in case
        # we want to start a REPL.
        sys.exit = lambda x: None

    proboscis.TestProgram(argv=nose_args,
                          groups=groups,
                          config=c,
                          testRunner=MAIN_RUNNER).run_and_exit()
    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
Esempio n. 13
0
def run_main(test_importer):

    add_support_for_localization()

    # Strip non-nose arguments out before passing this to nosetests

    repl = False
    nose_args = []
    conf_file = "~/test.conf"
    show_elapsed = True
    groups = []
    print("RUNNING TEST ARGS :  " + str(sys.argv))
    extra_test_conf_lines = []
    rdl_config_file = None
    nova_flag_file = None
    index = 0
    while index < len(sys.argv):
        arg = sys.argv[index]
        if arg[:2] == "-i" or arg == '--repl':
            repl = True
        elif arg[:7] == "--conf=":
            conf_file = os.path.expanduser(arg[7:])
            print("Setting TEST_CONF to " + conf_file)
            os.environ["TEST_CONF"] = conf_file
        elif arg[:8] == "--group=":
            groups.append(arg[8:])
        elif arg == "--test-config":
            if index >= len(sys.argv) - 1:
                print('Expected an argument to follow "--test-conf".')
                sys.exit()
            conf_line = sys.argv[index + 1]
            extra_test_conf_lines.append(conf_line)
        elif arg[:11] == "--flagfile=":
            pass
        elif arg[:14] == "--config-file=":
            rdl_config_file = arg[14:]
        elif arg[:13] == "--nova-flags=":
            nova_flag_file = arg[13:]
        elif arg.startswith('--hide-elapsed'):
            show_elapsed = False
        else:
            nose_args.append(arg)
        index += 1

    # Many of the test decorators depend on configuration values, so before
    # start importing modules we have to load the test config followed by the
    # flag files.
    from trove.tests.config import CONFIG

    # Find config file.
    if not "TEST_CONF" in os.environ:
        raise RuntimeError("Please define an environment variable named " +
                           "TEST_CONF with the location to a conf file.")
    file_path = os.path.expanduser(os.environ["TEST_CONF"])
    if not os.path.exists(file_path):
        raise RuntimeError("Could not find TEST_CONF at " + file_path + ".")
        # Load config file and then any lines we read from the arguments.
    CONFIG.load_from_file(file_path)
    for line in extra_test_conf_lines:
        CONFIG.load_from_line(line)

    if CONFIG.white_box:  # If white-box testing, set up the flags.
        # Handle loading up RDL's config file madness.
        initialize_rdl_config(rdl_config_file)
        if nova_flag_file:
            initialize_nova_flags(nova_flag_file)

    # Set up the report, and print out how we're running the tests.
    from tests.util import report
    from datetime import datetime
    report.log("Trove Integration Tests, %s" % datetime.now())
    report.log("Invoked via command: " + str(sys.argv))
    report.log("Groups = " + str(groups))
    report.log("Test conf file = %s" % os.environ["TEST_CONF"])
    if CONFIG.white_box:
        report.log("")
        report.log("Test config file = %s" % rdl_config_file)
    report.log("")
    report.log("sys.path:")
    for path in sys.path:
        report.log("\t%s" % path)

    # Now that all configurations are loaded its time to import everything
    test_importer()

    atexit.register(_clean_up)

    c = config.Config(stream=sys.stdout,
                      env=os.environ,
                      verbosity=3,
                      plugins=core.DefaultPluginManager())
    runner = NovaTestRunner(stream=c.stream,
                            verbosity=c.verbosity,
                            config=c,
                            show_elapsed=show_elapsed,
                            known_bugs=CONFIG.known_bugs)
    MAIN_RUNNER = runner

    if repl:
        # Turn off the following "feature" of the unittest module in case
        # we want to start a REPL.
        sys.exit = lambda x: None

    proboscis.TestProgram(argv=nose_args, groups=groups, config=c,
                          testRunner=MAIN_RUNNER).run_and_exit()
    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
    if WHITE_BOX:  # If white-box testing, set up the flags.
        # Set up the flag file values, which we need to call certain Nova code.
        from tests.util import test_config
        nova_conf = test_config.values["nova_conf"]

        from nova import utils
        utils.default_flagfile(str(nova_conf))

        from nova import flags
        FLAGS = flags.FLAGS
        FLAGS(sys.argv)

    # Set up the report, and print out how we're running the tests.
    from tests.util import report
    from datetime import datetime
    report.log("Reddwarf Integration Tests, %s" % datetime.now())
    report.log("Invoked via command: " + str(sys.argv))
    report.log("Groups = " + str(groups))
    report.log("Test conf file = %s" % os.environ["TEST_CONF"])
    if WHITE_BOX:
        report.log("")
        report.log("Test FLAG file = %s" % nova_conf)
    report.log("")
    report.log("sys.path:")
    for path in sys.path:
        report.log("\t%s" % path)

    # Now that all configurations are loaded its time to import everything.

    import proboscis
    from tests.dns import check_domain
Esempio n. 15
0
    def test_create(self):
        databases = []
        databases.append({
            "name": "firstdb",
            "character_set": "latin2",
            "collate": "latin2_general_ci"
        })
        databases.append({"name": "db2"})
        instance_info.databases = databases
        users = []
        users.append({
            "name": "lite",
            "password": "******",
            "databases": [{
                "name": "firstdb"
            }]
        })
        instance_info.users = users
        if test_config.values['reddwarf_main_instance_has_volume']:
            instance_info.volume = {'size': 1}
        else:
            instance_info.volume = None

        if create_new_instance():
            instance_info.initial_result = dbaas.instances.create(
                instance_info.name, instance_info.dbaas_flavor_href,
                instance_info.volume, databases, users)
            assert_equal(200, dbaas.last_http_code)
        else:
            id = existing_instance()
            instance_info.initial_result = dbaas.instances.get(id)

        result = instance_info.initial_result
        instance_info.id = result.id
        if WHITE_BOX:
            instance_info.local_id = dbapi.localid_from_uuid(result.id)

        report.log("Instance UUID = %s" % instance_info.id)
        if create_new_instance():
            if WHITE_BOX:
                building = dbaas_mapping[power_state.BUILDING]
                assert_equal(result.status, building)
            assert_equal("BUILD", instance_info.initial_result.status)

        else:
            report.log("Test was invoked with TESTS_USE_INSTANCE_ID=%s, so no "
                       "instance was actually created." % id)
            if WHITE_BOX:
                report.log("Local id = %d" % instance_info.get_local_id())

        # Check these attrs only are returned in create response
        expected_attrs = [
            'created', 'flavor', 'addresses', 'id', 'links', 'name', 'status',
            'updated'
        ]
        if test_config.values['reddwarf_can_have_volume']:
            expected_attrs.append('volume')
        if test_config.values['reddwarf_dns_support']:
            expected_attrs.append('hostname')

        with CheckInstance(result._info) as check:
            if create_new_instance():
                check.attrs_exist(result._info,
                                  expected_attrs,
                                  msg="Create response")
            # Don't CheckInstance if the instance already exists.
            check.flavor()
            check.links(result._info['links'])
            if test_config.values['reddwarf_can_have_volume']:
                check.volume()
    # Reset values imported into tests/__init__.
    # TODO(tim.simpson): Stop importing them from there.
    from tests import initialize_globals
    initialize_globals()

    from tests import WHITE_BOX
    if WHITE_BOX:  # If white-box testing, set up the flags.
        # Handle loading up RDL's config file madness.
        initialize_rdl_config(rdl_config_file)
        if nova_flag_file:
            initialize_nova_flags(nova_flag_file)

    # Set up the report, and print out how we're running the tests.
    from tests.util import report
    from datetime import datetime
    report.log("Reddwarf Integration Tests, %s" % datetime.now())
    report.log("Invoked via command: " + str(sys.argv))
    report.log("Groups = " + str(groups))
    report.log("Test conf file = %s" % os.environ["TEST_CONF"])
    if WHITE_BOX:
        report.log("")
        report.log("Test config file = %s" % rdl_config_file)
    report.log("")
    report.log("sys.path:")
    for path in sys.path:
        report.log("\t%s" % path)

    from tests.util.client import monkey_patch_reddwarf_client
    monkey_patch_reddwarf_client()

    # Now that all configurations are loaded its time to import everything.
Esempio n. 17
0
    # TODO(tim.simpson): Stop importing them from there.
    from tests import initialize_globals
    initialize_globals()

    from tests import WHITE_BOX
    if WHITE_BOX:  # If white-box testing, set up the flags.
        # Handle loading up RDL's config file madness.
        initialize_rdl_config(rdl_config_file)
        if nova_flag_file:
            initialize_nova_flags(nova_flag_file)


    # Set up the report, and print out how we're running the tests.
    from tests.util import report
    from datetime import datetime
    report.log("Trove Integration Tests, %s" % datetime.now())
    report.log("Invoked via command: " + str(sys.argv))
    report.log("Groups = " + str(groups))
    report.log("Test conf file = %s" % os.environ["TEST_CONF"])
    if WHITE_BOX:
        report.log("")
        report.log("Test config file = %s" % rdl_config_file)
    report.log("")
    report.log("sys.path:")
    for path in sys.path:
        report.log("\t%s" % path)

    # Now that all configurations are loaded its time to import everything.

    import proboscis
    # TODO(tim.simpson): Import these again once white box test functionality
Esempio n. 18
0
 def log_current_users(self):
     users = self.dbaas.users.list(self.instance_id)
     report.log("Current user count = %d" % len(users))
     for user in users:
         report.log("\t" + str(user))