Ejemplo n.º 1
0
    def test_concurrent_synchronization(self):
        """Synchronize two repos concurrently

        @id: ba0e8b76-f24d-433b-b14f-5bc7a7fefd95

        @Steps:

        1. get list of all enabled repositories (setUpClass)
        2. sync 2, 3, ..., X repositories as consecutive test cases
        3. for each test case, delegate synchronization to
            ``robottelo.tests.kick_off_sync_test``
        4. in each test case, get the max timing value on each iteration
            and store into max-timing-dict. For example, for 2-repo
            test case, it sync 2 repositories and repeat 3 three times
                     1       2       3
            repo-1   21.48   13.87   33.16
            repo-2   95.33   81.77   21.69

            Then it would extract the max only and return the dictionary:
            ``{2: [95.33, 81.77, 33.16]}``. Repeat from 2-repo test case
            to 10-repo case.
        """
        total_max_timing = {}
        for current_num_threads in range(2, self.max_num_tests + 1):
            # kick off N-repo sync test case
            self.logger.debug("Kick off {0}-repo test case:".format(current_num_threads))
            total_max_timing[current_num_threads] = []

            # if resync test, sequentially sync all repos for first time
            if not self.is_initial_sync:
                self.logger.debug("Initial sync prior to {0}-repo Resync test case:".format(current_num_threads))
                Pulp.repositories_sequential_sync(self.repo_names_list, self.map_repo_name_id, 1)
                self.logger.debug("Initial sync prior to {0}-repo Resync test finished.".format(current_num_threads))

            subtest_dict = self.kick_off_concurrent_sync_test(current_num_threads, self.is_initial_sync)

            # generate csv and charts for raw data of Pulp tests
            self._write_raw_csv_chart_pulp(
                self.raw_file_name,
                subtest_dict,
                current_num_threads,
                "raw-sync-{0}-clients".format(current_num_threads),
            )

            # get max for each iteration
            for iteration in range(self.sync_iterations):
                total_max_timing[current_num_threads].append(
                    max(
                        [
                            subtest_dict.get("thread-{0}".format(thread))[iteration]
                            for thread in range(current_num_threads)
                        ]
                    )
                )

        self.logger.debug("Total Results for all tests from 2 threads to 10 threads: {0}".format(total_max_timing))
        self._write_stat_pulp_concurrent(total_max_timing)
Ejemplo n.º 2
0
    def test_sequential_synchronization(self):
        """
        Synchronize two repos sequentially

        @Steps:

        1. get list of all enabled repositories (setUpClass)
        2. Synchronize from the first to last repo sequentially
        3. produce result of timing, delegated to
            ``robottelo.tests.kick_off_sync_test``

        @Assert: Target repositories are enabled

        """
        time_result_dict_sync = Pulp.repositories_sequential_sync(
            self.repo_names_list,
            self.map_repo_name_id,
            self.sync_iterations,
            self.savepoint
        )
        self._write_raw_csv_chart_pulp(
            self.raw_file_name,
            time_result_dict_sync,
            1,
            'raw-sync-sequential'
        )
        self._write_stat_pulp_linear(time_result_dict_sync)
Ejemplo n.º 3
0
    def setUpClass(cls):
        super(ConcurrentSyncTestCase, cls).setUpClass()

        # note: may need to change savepoint in config file
        cls._set_testcase_parameters(
            'performance.test.savepoint2_enabled_repos',
            RAW_SYNC_FILE_NAME,
            STAT_SYNC_FILE_NAME,
        )

        # get enabled repositories information
        cls.map_repo_name_id = Pulp.get_enabled_repos(cls.org_id)
        cls.logger.debug(cls.map_repo_name_id)

        # get number of iterations of syncs that each thread would do
        cls.sync_iterations = int(conf.properties.get(
            'performance.test.num_syncs',
            '3'
        ))

        # get whether start initial sync or resync test
        sync_parameter = conf.properties.get(
            'performance.test.sync_type',
            'sync'
        )
        cls.is_initial_sync = True if sync_parameter == 'sync' else False
Ejemplo n.º 4
0
    def run(self):
        LOGGER.debug(
            "{0}: synchronize repository {1} attempt {2}"
            .format(self.thread_name, self.repository_name, self.iteration)
        )

        time_point = Pulp.repository_single_sync(
            self.repository_id,
            self.repository_name,
            self.thread_id,
        )

        # append sync timing to each thread
        self.time_result_dict.get(self.thread_name).append(time_point)
Ejemplo n.º 5
0
    def setUpClass(cls):
        super(ConcurrentSyncTestCase, cls).setUpClass()

        # note: may need to change savepoint in config file
        cls._set_testcase_parameters("enabled_repos", RAW_SYNC_FILE_NAME, STAT_SYNC_FILE_NAME)

        # get enabled repositories information
        cls.map_repo_name_id = Pulp.get_enabled_repos(cls.org_id)
        cls.logger.debug(cls.map_repo_name_id)

        # get number of iterations of syncs that each thread would do
        cls.sync_iterations = settings.performance.sync_count

        # get whether start initial sync or resync test
        sync_parameter = settings.performance.sync_type
        cls.is_initial_sync = True if sync_parameter == "sync" else False
Ejemplo n.º 6
0
    def setUpClass(cls):
        super(ConcurrentSyncTestCase, cls).setUpClass()

        # note: may need to change savepoint in config file
        cls._set_testcase_parameters(
            'enabled_repos',
            RAW_SYNC_FILE_NAME,
            STAT_SYNC_FILE_NAME,
        )

        # get enabled repositories information
        cls.map_repo_name_id = Pulp.get_enabled_repos(cls.org_id)
        cls.logger.debug(cls.map_repo_name_id)

        # get number of iterations of syncs that each thread would do
        cls.sync_iterations = settings.performance.sync_count

        # get whether start initial sync or resync test
        sync_parameter = settings.performance.sync_type
        cls.is_initial_sync = True if sync_parameter == 'sync' else False
Ejemplo n.º 7
0
    def test_sequential_synchronization(self):
        """Synchronize two repos sequentially

        :id: 78ec0c73-d29e-4b11-b58d-7de473b16f61

        :Steps:

            1. get list of all enabled repositories (setUpClass)
            2. Synchronize from the first to last repo sequentially
            3. produce result of timing, delegated to
               ``robottelo.tests.kick_off_sync_test``

        :expectedresults: Target repositories are enabled
        """
        time_result_dict_sync = Pulp.repositories_sequential_sync(
            self.repo_names_list, self.map_repo_name_id, self.sync_iterations,
            self.savepoint)
        self._write_raw_csv_chart_pulp(self.raw_file_name,
                                       time_result_dict_sync, 1,
                                       'raw-sync-sequential')
        self._write_stat_pulp_linear(time_result_dict_sync)
Ejemplo n.º 8
0
    def test_concurrent_synchronization(self):
        """Synchronize two repos concurrently

        :id: ba0e8b76-f24d-433b-b14f-5bc7a7fefd95

        :Steps:

            1. get list of all enabled repositories (setUpClass)
            2. sync 2, 3, ..., X repositories as consecutive test cases
            3. for each test case, delegate synchronization to
                ``robottelo.tests.kick_off_sync_test``
            4. in each test case, get the max timing value on each iteration
                and store into max-timing-dict. For example, for 2-repo
                test case, it sync 2 repositories and repeat 3 three times::

                             1       2       3
                    repo-1   21.48   13.87   33.16
                    repo-2   95.33   81.77   21.69

                Then it would extract the max only and return the dictionary:
                ``{2: [95.33, 81.77, 33.16]}``. Repeat from 2-repo test case
                to 10-repo case.
        """
        total_max_timing = {}
        for current_num_threads in range(2, self.max_num_tests + 1):
            # kick off N-repo sync test case
            self.logger.debug(
                'Kick off {0}-repo test case:'.format(current_num_threads))
            total_max_timing[current_num_threads] = []

            # if resync test, sequentially sync all repos for first time
            if not self.is_initial_sync:
                self.logger.debug(
                    'Initial sync prior to {0}-repo Resync test case:'.format(
                        current_num_threads))
                Pulp.repositories_sequential_sync(self.repo_names_list,
                                                  self.map_repo_name_id, 1)
                self.logger.debug(
                    'Initial sync prior to {0}-repo Resync test finished.'.
                    format(current_num_threads))

            subtest_dict = self.kick_off_concurrent_sync_test(
                current_num_threads, self.is_initial_sync)

            # generate csv and charts for raw data of Pulp tests
            self._write_raw_csv_chart_pulp(
                self.raw_file_name, subtest_dict, current_num_threads,
                'raw-sync-{0}-clients'.format(current_num_threads))

            # get max for each iteration
            for iteration in range(self.sync_iterations):
                total_max_timing[current_num_threads].append(
                    max([
                        subtest_dict.get(
                            'thread-{0}'.format(thread))[iteration]
                        for thread in range(current_num_threads)
                    ]))

        self.logger.debug(
            'Total Results for all tests from 2 threads to 10 threads: {0}'.
            format(total_max_timing))
        self._write_stat_pulp_concurrent(total_max_timing)