def _write_stat_per_client_bucketized(self, stat_file_name, time_result_dict, current_num_threads): """Write bucketized stat of per-client results to csv file note: each bucket is just a split of a client i. For example:: Input: # of clients = 1 {thread-i: [(50 data) | (50 data)|...]} Output: line chart of statistics on these buckets. """ test_category = self._get_output_filename(stat_file_name) for i in range(current_num_threads): time_list = time_result_dict.get('thread-{0}'.format(i)) thread_name = 'client-{0}'.format(i) stat_dict = generate_stat_for_concurrent_thread( thread_name, time_list, stat_file_name, self.bucket_size, self.num_buckets) # create line chart with each client being grouped by buckets generate_line_chart_stat_bucketized_candlepin( stat_dict, 'Concurrent Subscription Statistics - per client bucketized: ' 'Client-{0} by {1}-{2}-clients'.format(i, test_category, current_num_threads), '{0}-client-{1}-bucketized-{2}-clients.svg'.format( test_category, i, current_num_threads), self.bucket_size, self.num_buckets)
def _write_stat_per_test_bucketized( self, stat_file_name, time_result_dict): """Write bucketized stat of per-test to csv file note: each bucket of all clients would merge into a chunk; generate stat for each such chunk. For example:: Input: # of clients = 10 thread-0: [(50 data) | (50 data)|...] thread-1: [(50 data) | (50 data)|...] ... thread-9: [(50 data) | (50 data)|...] Output: sublist [500 data grouped from all clients' first buckets], [500 data grouped from all clients' next buckets], ... [500 data grouped from all clients' last buckets]; line chart of statistics on these chunks. """ # parameters for generating bucketized line chart stat_dict = {} return_stat = {} current_num_threads = len(time_result_dict) test_category = self._get_output_filename(stat_file_name) for i in range(self.num_buckets): chunks_bucket_i = [] for j in range(len(time_result_dict)): time_list = time_result_dict.get('thread-{}'.format(j)) # slice out bucket-size from each client's result and merge chunks_bucket_i += time_list[ i * self.bucket_size: (i + 1) * self.bucket_size ] # for each chunk i, compute and output its stat return_stat = generate_stat_for_concurrent_thread( 'bucket-{}'.format(i), chunks_bucket_i, stat_file_name, len(chunks_bucket_i), 1 ) # for each chunk i, add stat into final return_dict stat_dict.update({i: return_stat.get(0, (0, 0, 0, 0))}) # create line chart with all clients grouped by a chunk of buckets generate_line_chart_stat_bucketized_candlepin( stat_dict, 'Concurrent Subscription Statistics - per test bucketized: ' '({0}-{1}-clients)' .format(test_category, current_num_threads), '{0}-test-bucketized-{1}-clients.svg' .format(test_category, current_num_threads), self.bucket_size, self.num_buckets )
def _write_stat_per_test_bucketized( self, stat_file_name, time_result_dict): """Write bucketized stat of per-test to csv file note: each bucket of all clients would merge into a chunk; generate stat for each such chunk. For example:: Input: # of clients = 10 thread-0: [(50 data) | (50 data)|...] thread-1: [(50 data) | (50 data)|...] ... thread-9: [(50 data) | (50 data)|...] Output: sublist [500 data grouped from all clients' first buckets], [500 data grouped from all clients' next buckets], ... [500 data grouped from all clients' last buckets]; line chart of statistics on these chunks. """ # parameters for generating bucketized line chart stat_dict = {} return_stat = {} current_num_threads = len(time_result_dict) test_category = self._get_output_filename(stat_file_name) for i in range(self.num_buckets): chunks_bucket_i = [] for j in range(len(time_result_dict)): time_list = time_result_dict.get('thread-{0}'.format(j)) # slice out bucket-size from each client's result and merge chunks_bucket_i += time_list[ i * self.bucket_size: (i + 1) * self.bucket_size ] # for each chunk i, compute and output its stat return_stat = generate_stat_for_concurrent_thread( 'bucket-{0}'.format(i), chunks_bucket_i, stat_file_name, len(chunks_bucket_i), 1 ) # for each chunk i, add stat into final return_dict stat_dict.update({i: return_stat.get(0, (0, 0, 0, 0))}) # create line chart with all clients grouped by a chunk of buckets generate_line_chart_stat_bucketized_candlepin( stat_dict, 'Concurrent Subscription Statistics - per test bucketized: ' '({0}-{1}-clients)' .format(test_category, current_num_threads), '{0}-test-bucketized-{1}-clients.svg' .format(test_category, current_num_threads), self.bucket_size, self.num_buckets )
def _write_stat_per_client_bucketized( self, stat_file_name, time_result_dict, current_num_threads): """Write bucketized stat of per-client results to csv file note: each bucket is just a split of a client i. For example:: Input: # of clients = 1 {thread-i: [(50 data) | (50 data)|...]} Output: line chart of statistics on these buckets. """ test_category = self._get_output_filename(stat_file_name) for i in range(current_num_threads): time_list = time_result_dict.get('thread-{}'.format(i)) thread_name = 'client-{}'.format(i) stat_dict = generate_stat_for_concurrent_thread( thread_name, time_list, stat_file_name, self.bucket_size, self.num_buckets ) # create line chart with each client being grouped by buckets generate_line_chart_stat_bucketized_candlepin( stat_dict, 'Concurrent Subscription Statistics - per client bucketized: ' 'Client-{0} by {1}-{2}-clients' .format(i, test_category, current_num_threads), '{0}-client-{1}-bucketized-{2}-clients.svg' .format(test_category, i, current_num_threads), self.bucket_size, self.num_buckets )