コード例 #1
0
    def test_num_failures_not_captured(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-numFailures-not-captured.yml').read())
            ['driverWorkload'])

        stats = self.run_test_expecting_error(driver_workload)
        self.assert_basic_stats(stats)

        # The workload executor is still expected to report failures propagated
        # from the unified test runner (e.g. loop operation without
        # storeErrorsAsEntity and storeFailuresAsEntity) and is permitted to
        # report them as errors. For this reason, we must be flexible and allow
        # either numFailures or numErrors to be reported (but not both).
        if not ((stats['numErrors'] == 0 and stats['numFailures'] == 1) or
                (stats['numErrors'] == 1 and stats['numFailures'] == 0)):
            self.fail_stats(
                "Expected either numErrors:0 and numFailures:1 or numErrors:1 "
                "and numFailures:0, but got numErrors:{} and numFailures:{} "
                "instead.".format(stats['numErrors'], stats['numFailures']))

        # Note: we do not assert numSuccesses or numIterations because the spec
        # does not guarantee that they will be reported via the entity map if
        # test runner propagates an error/failure.

        # In the event the test runner does not capture a failure, the workload
        # executor is expected to report it in the same format; however, it may
        # be reported as either an error or failure
        self.assert_events(hasEvents=False, hasErrorsXorFailures=True)
コード例 #2
0
    def test_num_failures_as_errors(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-numFailures-as-errors.yml').read())
            ['driverWorkload'])

        stats = self.run_test(driver_workload)
        self.assert_basic_stats(stats)

        # Failures should be reported in numErrors instead of numFailures
        if stats['numFailures'] != 0:
            self.fail_stats(
                "Expected no failures to be reported in numFailures, but got "
                "{} instead.".format(stats['numFailures']))

        # Each loop iteration should include two successful sub-operations
        # followed by one failure, so expect numErrors to be numSuccesses/2
        if abs(stats['numErrors'] - stats['numSuccesses'] / 2) > 1:
            self.fail(
                "Expected approximately {}/2 failed operations to be reported "
                "in numErrors, but got {} instead.".format(
                    stats['numSuccesses'], stats['numErrors']))

        # Each loop iteration should include two successful sub-operations, so
        # expect reported numIterations to be numSuccesses/2
        if abs(stats['numIterations'] - stats['numSuccesses'] / 2) > 1:
            self.fail(
                "Expected approximately {}/2 iterations to be reported in "
                "numIterations, but got {} instead.".format(
                    stats['numSuccesses'], stats['numIterations']))

        self.assert_events(hasEvents=False, hasErrors=True, hasFailures=False)
コード例 #3
0
    def test_num_errors_not_captured(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-numErrors-not-captured.yml').read())
            ['driverWorkload'])

        stats = self.run_test_expecting_error(driver_workload)
        self.assert_basic_stats(stats)

        # The workload executor is still expected to report errors propagated
        # from the unified test runner (e.g. loop operation without
        # storeErrorsAsEntity and storeFailuresAsEntity).
        if stats['numErrors'] != 1:
            self.fail_stats(
                "Expected one error to be reported, but got {} instead.".
                format(stats['numErrors']))

        if stats['numFailures'] != 0:
            self.fail_stats(
                "Expected no failures to be reported, but got {} instead.".
                format(stats['numFailures']))

        # Note: we do not assert numSuccesses or numIterations because the spec
        # does not guarantee that they will be reported via the entity map if
        # test runner propagates an error/failure.

        # In the event the test runner does not capture an error, the workload
        # executor is expected to report it in the same format
        self.assert_events(hasEvents=False, hasErrors=True, hasFailures=False)
コード例 #4
0
    def test_num_failures(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(open('tests/validator-numFailures.yml').read())
            ['driverWorkload'])

        stats = self.run_test(driver_workload)
        self.assert_basic_stats(stats)

        # Since the test only uses storeFailuresAsEntity, numErrors should never
        # be reported. This is irrelevant to whether or how a test runner
        # distinguishes between errors and failures.
        if stats['numErrors'] != 0:
            self.fail_stats(
                "Expected no errors to be reported, but got {} instead.".
                format(stats['numErrors']))

        # Each loop iteration should include two successful sub-operations
        # followed by one failure, so expect numFailures to be numSuccesses/2
        if abs(stats['numFailures'] - stats['numSuccesses'] / 2) > 1:
            self.fail_stats(
                "Expected approximately {}/2 failed operations to be reported "
                "in numFailures, but got {} instead.".format(
                    stats['numSuccesses'], stats['numFailures']))

        # Each loop iteration should include two successful sub-operations, so
        # expect reported numIterations to be numSuccesses/2
        if abs(stats['numIterations'] - stats['numSuccesses'] / 2) > 1:
            self.fail_stats(
                "Expected approximately {}/2 iterations to be reported in "
                "numIterations, but got {} instead.".format(
                    stats['numSuccesses'], stats['numIterations']))

        self.assert_events(hasEvents=False, hasErrors=False, hasFailures=True)
コード例 #5
0
    def test_simple(self):
        operations = [{
            'object': 'collection',
            'name': 'updateOne',
            'arguments': {
                'filter': {
                    '_id': 'validation_sentinel'
                },
                'update': {
                    '$inc': {
                        'count': 1
                    }
                }
            }
        }]
        driver_workload = deepcopy(DRIVER_WORKLOAD)
        driver_workload['operations'] = operations
        driver_workload = JSONObject.from_dict(driver_workload)

        stats = self.run_test(driver_workload)

        num_reported_updates = stats['numSuccesses']
        update_count = self.coll.find_one({'_id':
                                           'validation_sentinel'})['count']
        if abs(num_reported_updates - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected {} successful "
                      "updates to be reported, got {} instead.".format(
                          update_count, num_reported_updates))
        if update_count == 0:
            self.fail("The workload executor didn't execute any operations "
                      "or didn't execute them appropriately.")
コード例 #6
0
    def test_num_failures_as_errors(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-numFailures-as-errors.yml').read())
            ['driverWorkload'])

        stats = self.run_test(driver_workload)

        num_reported_finds = stats['numSuccesses']

        num_reported_errors = stats['numErrors']
        num_reported_failures = stats['numFailures']
        if abs(num_reported_errors - num_reported_finds / 2) > 1:
            self.fail(
                "The workload executor reported inconsistent execution "
                "statistics. Expected approximately {}/2 errored operations "
                "to be reported, got {} instead.".format(
                    num_reported_finds, num_reported_failures))
        if num_reported_failures > 0:
            self.fail(
                "The workload executor reported unexpected execution "
                "statistics. Expected all failures to be reported as errors, "
                "got {} failures instead.".format(num_reported_failures))
        if abs(stats['numIterations'] - num_reported_finds / 2) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected {} iterations "
                      "to be reported, got {} instead.".format(
                          num_reported_finds, stats['numIterations']))
コード例 #7
0
    def test_num_errors(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(open('tests/validator-numErrors.yml').read())
            ['driverWorkload'])

        stats = self.run_test(driver_workload)

        num_successes = stats['numSuccesses']
        update_count = self.coll.find_one({'_id':
                                           'validation_sentinel'})['count']
        if abs(num_successes / 2 - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected 2*{} successful "
                      "operations to be reported, got {} instead.".format(
                          update_count, num_successes))

        num_reported_errors = stats['numErrors']
        if abs(num_reported_errors - num_successes / 2) > 1:
            self.fail(
                "The workload executor reported inconsistent execution "
                "statistics. Expected approximately {}/2 errored operations "
                "to be reported, got {} instead.".format(
                    num_successes, num_reported_errors))
        if abs(stats['numIterations'] - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected {} iterations "
                      "to be reported, got {} instead.".format(
                          update_count, stats['numIterations']))
コード例 #8
0
    def test_num_failures_not_captured(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-numFailures-not-captured.yml').read())
            ['driverWorkload'])

        stats = self.run_test_expecting_error(driver_workload)

        num_reported_errors = stats['numFailures']
        if num_reported_errors != -1:
            self.fail(
                "The workload executor reported unexpected execution "
                "statistics. Expected -1 failures since failures were not captured, "
                "got {} instead.".format(num_reported_errors))
コード例 #9
0
    def test_num_errors(self):
        operations = [{
            'object': 'collection',
            'name': 'updateOne',
            'arguments': {
                'filter': {
                    '_id': 'validation_sentinel'
                },
                'update': {
                    '$inc': {
                        'count': 1
                    }
                }
            }
        }, {
            'object': 'collection',
            'name': 'doesNotExist',
            'arguments': {
                'foo': 'bar'
            }
        }]
        driver_workload = deepcopy(DRIVER_WORKLOAD)
        driver_workload['operations'] = operations
        driver_workload = JSONObject.from_dict(driver_workload)

        stats = self.run_test(driver_workload)

        num_reported_updates = stats['numSuccesses']
        update_count = self.coll.find_one({'_id':
                                           'validation_sentinel'})['count']
        if abs(num_reported_updates - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected {} successful "
                      "updates to be reported, got {} instead.".format(
                          update_count, num_reported_updates))

        num_reported_errors = stats['numErrors']
        if abs(num_reported_errors - num_reported_updates) > 1:
            self.fail(
                "The workload executor reported inconsistent execution "
                "statistics. Expected approximately {} errored operations "
                "to be reported, got {} instead.".format(
                    num_reported_updates, num_reported_errors))
コード例 #10
0
    def test_simple(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-simple.yml').read())['driverWorkload'])

        stats = self.run_test(driver_workload)
        self.assert_basic_stats(stats)

        # The simple test's loop contains a single updateOne that increments the
        # document on each iteration. Fetch that value to use for assertions on
        # numSuccesses and numIterations.
        update_count = self.coll.find_one({'_id':
                                           'validation_sentinel'})['count']

        if update_count == 0:
            self.fail("The workload executor didn't execute any operations "
                      "or didn't execute them appropriately.")

        if abs(stats['numSuccesses'] / 2 - update_count) > 1:
            self.fail_stats(
                "Expected 2*{} successful operations to be reported in "
                "numSuccesses, but got {} instead.".format(
                    update_count, stats['numSuccesses']))

        if abs(stats['numIterations'] - update_count) > 1:
            self.fail(
                "Expected {} iterations to be reported in numIterations, but "
                "got {} instead.".format(update_count, stats['numIterations']))

        if stats['numErrors'] != 0:
            self.fail_stats(
                "Expected no errors to be reported, but got {} instead.".
                format(stats['numErrors']))

        if stats['numFailures'] != 0:
            self.fail_stats(
                "Expected no failures to be reported, but got {} instead.".
                format(stats['numFailures']))

        self.assert_events(hasEvents=True, hasErrors=False, hasFailures=False)
コード例 #11
0
from subprocess import TimeoutExpired
from time import sleep
from unittest import TestCase

from pymongo import MongoClient

from atlasclient import JSONObject
from astrolabe.exceptions import WorkloadExecutorError
from astrolabe.utils import DriverWorkloadSubprocessRunner, load_test_data

DRIVER_WORKLOAD = JSONObject.from_dict({
    'database':
    'validation_db',
    'collection':
    'validation_coll',
    'testData': [{
        '_id': 'validation_sentinel',
        'count': 0
    }],
    'operations': []
})


class ValidateWorkloadExecutor(TestCase):
    WORKLOAD_EXECUTOR = None
    CONNECTION_STRING = None
    STARTUP_TIME = None

    def setUp(self):
        self.client = MongoClient(self.CONNECTION_STRING, w='majority')
        self.coll = self.client.get_database(
コード例 #12
0
    def __init__(self, *, client, test_locator_token, configuration,
                 xunit_output, persist_clusters, workload_startup_time):
        self.cases = []
        self.client = client
        self.config = configuration
        self.xunit_logger = SingleTestXUnitLogger(
            output_directory=xunit_output)
        self.persist_clusters = persist_clusters
        self.workload_startup_time = workload_startup_time

        for full_path in self.find_spec_tests(test_locator_token):
            # Step-1: load test specification.
            with open(full_path, 'r') as spec_file:
                test_spec = JSONObject.from_dict(
                    yaml.load(spec_file, Loader=yaml.FullLoader))

            # Step-2: generate test name.
            test_name = get_test_name_from_spec_file(full_path)

            # Step-3: generate unique cluster name.
            cluster_name = get_cluster_name(test_name, self.config.name_salt)

            self.cases.append(
                AtlasTestCase(client=self.client,
                              test_name=test_name,
                              cluster_name=cluster_name,
                              specification=test_spec,
                              configuration=self.config))

        # Set up Atlas for tests.
        # Step-1: ensure validity of the organization.
        # Note: organizations can only be created by via the web UI.
        org_name = self.config.organization_name
        LOGGER.info("Verifying organization {!r}".format(org_name))
        org = get_one_organization_by_name(client=self.client,
                                           organization_name=org_name)
        LOGGER.info("Successfully verified organization {!r}".format(org_name))

        # Step-2: check that the project exists or else create it.
        pro_name = self.config.project_name
        LOGGER.info("Verifying project {!r}".format(pro_name))
        project = ensure_project(client=self.client,
                                 project_name=pro_name,
                                 organization_id=org.id)
        LOGGER.info("Successfully verified project {!r}".format(pro_name))

        # Step-3: create a user under the project.
        # Note: all test operations will be run as this user.
        uname = self.config.database_username
        LOGGER.info("Verifying user {!r}".format(uname))
        ensure_admin_user(client=self.client,
                          project_id=project.id,
                          username=uname,
                          password=self.config.database_password)
        LOGGER.info("Successfully verified user {!r}".format(uname))

        # Step-4: populate project IP whitelist to allow access from anywhere.
        LOGGER.info("Enabling access from anywhere on project "
                    "{!r}".format(pro_name))
        ensure_connect_from_anywhere(client=self.client, project_id=project.id)
        LOGGER.info("Successfully enabled access from anywhere on project "
                    "{!r}".format(pro_name))

        # Step-5: log test plan.
        LOGGER.info(self.get_printable_test_plan())
コード例 #13
0
    def test_simple(self):
        driver_workload = JSONObject.from_dict(
            yaml.safe_load(
                open('tests/validator-simple.yml').read())['driverWorkload'])

        if os.path.exists('events.json'):
            os.unlink('events.json')

        stats = self.run_test(driver_workload)

        num_successes = stats['numSuccesses']
        update_count = self.coll.find_one({'_id':
                                           'validation_sentinel'})['count']
        if abs(num_successes / 2 - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected 2*{} successful "
                      "operations to be reported, got {} instead.".format(
                          update_count, num_successes))
        if abs(stats['numIterations'] - update_count) > 1:
            self.fail("The workload executor reported inconsistent execution "
                      "statistics. Expected {} iterations "
                      "to be reported, got {} instead.".format(
                          update_count, stats['numIterations']))
        if update_count == 0:
            self.fail("The workload executor didn't execute any operations "
                      "or didn't execute them appropriately.")

        _events = yaml.safe_load(open('events.json').read())
        if 'events' not in _events:
            self.fail(
                "The workload executor didn't record events as expected.")
        events = _events['events']
        connection_events = [
            event for event in events if event['name'].startswith('Connection')
        ]
        if not connection_events:
            self.fail(
                "The workload executor didn't record connection events as expected."
            )
        pool_events = [
            event for event in events if event['name'].startswith('Pool')
        ]
        if not pool_events:
            self.fail(
                "The workload executor didn't record connection pool events as expected."
            )
        command_events = [
            event for event in events if event['name'].startswith('Command')
        ]
        if not command_events:
            self.fail(
                "The workload executor didn't record command events as expected."
            )
        for event_list in [connection_events, pool_events, command_events]:
            for event in event_list:
                if 'name' not in event:
                    self.fail(
                        "The workload executor didn't record event name as expected."
                    )
                if not event['name'].endswith('Event'):
                    self.fail(
                        "The workload executor didn't record event name as expected."
                    )
                if 'observedAt' not in event:
                    self.fail(
                        "The workload executor didn't record observation time as expected."
                    )