def test_course_listing_performance(self, store, courses_list_from_group_calls, courses_list_calls): """ Create large number of courses and give access of some of these courses to the user and compare the time to fetch accessible courses for the user through traversing all courses and reversing django groups """ # create list of random course numbers which will be accessible to the user user_course_ids = random.sample(range(TOTAL_COURSES_COUNT), USER_COURSES_COUNT) # create courses and assign those to the user which have their number in user_course_ids with self.store.default_store(store): for number in range(TOTAL_COURSES_COUNT): org = 'Org{0}'.format(number) course = 'Course{0}'.format(number) run = 'Run{0}'.format(number) course_location = self.store.make_course_key(org, course, run) if number in user_course_ids: self._create_course_with_access_groups(course_location, self.user, store=store) else: self._create_course_with_access_groups(course_location, store=store) # time the get courses by iterating through all courses with Timer() as iteration_over_courses_time_1: courses_iter, __ = _accessible_courses_iter(self.request) self.assertEqual(len(list(courses_iter)), USER_COURSES_COUNT) # time again the get courses by iterating through all courses with Timer() as iteration_over_courses_time_2: courses_iter, __ = _accessible_courses_iter(self.request) self.assertEqual(len(list(courses_iter)), USER_COURSES_COUNT) # time the get courses by reversing django groups with Timer() as iteration_over_groups_time_1: courses_list, __ = _accessible_courses_list_from_groups(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by reversing django groups with Timer() as iteration_over_groups_time_2: courses_list, __ = _accessible_courses_list_from_groups(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # TODO (cdyer) : iteration over courses was optimized, and is now # sometimes faster than iteration over groups. One of the following # should be done to resolve this: # * Iteration over groups should be sped up. # * Iteration over groups should be removed, as it no longer saves time. # * Or this part of the test should be removed. # Test that the time taken by getting courses through reversing django # groups is lower then the time taken by traversing through all courses # (if accessible courses are relatively small). #self.assertGreaterEqual(iteration_over_courses_time_1.elapsed, iteration_over_groups_time_1.elapsed) #self.assertGreaterEqual(iteration_over_courses_time_2.elapsed, iteration_over_groups_time_2.elapsed) # Now count the db queries with check_mongo_calls(courses_list_from_group_calls): _accessible_courses_list_from_groups(self.request) with check_mongo_calls(courses_list_calls): list(_accessible_courses_iter(self.request))
def test_course_listing_performance(self): """ Create large number of courses and give access of some of these courses to the user and compare the time to fetch accessible courses for the user through traversing all courses and reversing django groups """ # create list of random course numbers which will be accessible to the user user_course_ids = random.sample(range(TOTAL_COURSES_COUNT), USER_COURSES_COUNT) # create courses and assign those to the user which have their number in user_course_ids for number in range(TOTAL_COURSES_COUNT): org = 'Org{0}'.format(number) course = 'Course{0}'.format(number) run = 'Run{0}'.format(number) course_location = SlashSeparatedCourseKey(org, course, run) if number in user_course_ids: self._create_course_with_access_groups(course_location, self.user) else: self._create_course_with_access_groups(course_location) # time the get courses by iterating through all courses with Timer() as iteration_over_courses_time_1: courses_list, __ = _accessible_courses_list(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by iterating through all courses with Timer() as iteration_over_courses_time_2: courses_list, __ = _accessible_courses_list(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time the get courses by reversing django groups with Timer() as iteration_over_groups_time_1: courses_list, __ = _accessible_courses_list_from_groups( self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by reversing django groups with Timer() as iteration_over_groups_time_2: courses_list, __ = _accessible_courses_list_from_groups( self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # test that the time taken by getting courses through reversing django groups is lower then the time # taken by traversing through all courses (if accessible courses are relatively small) self.assertGreaterEqual(iteration_over_courses_time_1.elapsed, iteration_over_groups_time_1.elapsed) self.assertGreaterEqual(iteration_over_courses_time_2.elapsed, iteration_over_groups_time_2.elapsed) # Now count the db queries store = modulestore()._get_modulestore_by_type( ModuleStoreEnum.Type.mongo) with check_mongo_calls(USER_COURSES_COUNT): _accessible_courses_list_from_groups(self.request) # TODO: LMS-11220: Document why this takes 6 calls with check_mongo_calls(6): _accessible_courses_list(self.request)
def test_course_listing_performance(self): """ Create large number of courses and give access of some of these courses to the user and compare the time to fetch accessible courses for the user through traversing all courses and reversing django groups """ # create and log in a non-staff user self.user = UserFactory() request = self.factory.get('/course') request.user = self.user self.client.login(username=self.user.username, password='******') # create list of random course numbers which will be accessible to the user user_course_ids = random.sample(range(TOTAL_COURSES_COUNT), USER_COURSES_COUNT) # create courses and assign those to the user which have their number in user_course_ids for number in range(TOTAL_COURSES_COUNT): org = 'Org{0}'.format(number) course = 'Course{0}'.format(number) run = 'Run{0}'.format(number) course_location = Location(['i4x', org, course, 'course', run]) if number in user_course_ids: self._create_course_with_access_groups(course_location, 'group_name_with_dots', self.user) else: self._create_course_with_access_groups(course_location, 'group_name_with_dots') # time the get courses by iterating through all courses with Timer() as iteration_over_courses_time_1: courses_list = _accessible_courses_list(request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by iterating through all courses with Timer() as iteration_over_courses_time_2: courses_list = _accessible_courses_list(request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time the get courses by reversing django groups with Timer() as iteration_over_groups_time_1: courses_list = _accessible_courses_list_from_groups(request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by reversing django groups with Timer() as iteration_over_groups_time_2: courses_list = _accessible_courses_list_from_groups(request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # test that the time taken by getting courses through reversing django groups is lower then the time # taken by traversing through all courses (if accessible courses are relatively small) self.assertGreaterEqual(iteration_over_courses_time_1.elapsed, iteration_over_groups_time_1.elapsed) self.assertGreaterEqual(iteration_over_courses_time_2.elapsed, iteration_over_groups_time_2.elapsed)
def test_course_listing_performance(self, store, courses_list_from_group_calls, courses_list_calls): """ Create large number of courses and give access of some of these courses to the user and compare the time to fetch accessible courses for the user through traversing all courses and reversing django groups """ # create list of random course numbers which will be accessible to the user user_course_ids = random.sample(range(TOTAL_COURSES_COUNT), USER_COURSES_COUNT) # create courses and assign those to the user which have their number in user_course_ids with self.store.default_store(store): for number in range(TOTAL_COURSES_COUNT): org = 'Org{0}'.format(number) course = 'Course{0}'.format(number) run = 'Run{0}'.format(number) course_location = self.store.make_course_key(org, course, run) if number in user_course_ids: self._create_course_with_access_groups(course_location, self.user, store=store) else: self._create_course_with_access_groups(course_location, store=store) # time the get courses by iterating through all courses with Timer() as iteration_over_courses_time_1: courses_list, __ = _accessible_courses_list(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by iterating through all courses with Timer() as iteration_over_courses_time_2: courses_list, __ = _accessible_courses_list(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time the get courses by reversing django groups with Timer() as iteration_over_groups_time_1: courses_list, __ = _accessible_courses_list_from_groups(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # time again the get courses by reversing django groups with Timer() as iteration_over_groups_time_2: courses_list, __ = _accessible_courses_list_from_groups(self.request) self.assertEqual(len(courses_list), USER_COURSES_COUNT) # test that the time taken by getting courses through reversing django groups is lower then the time # taken by traversing through all courses (if accessible courses are relatively small) self.assertGreaterEqual(iteration_over_courses_time_1.elapsed, iteration_over_groups_time_1.elapsed) self.assertGreaterEqual(iteration_over_courses_time_2.elapsed, iteration_over_groups_time_2.elapsed) # Now count the db queries with check_mongo_calls(courses_list_from_group_calls): _accessible_courses_list_from_groups(self.request) with check_mongo_calls(courses_list_calls): _accessible_courses_list(self.request)
def cooccurrence( corpus, execnet_hub, targets, context, paths_progress_iter, output=('o', 'space.h5', 'The output space file.'), ): """Build the co-occurrence matrix.""" if targets.index.nlevels > 1: targets.sortlevel(inplace=True) if context.index.nlevels > 1: context.sortlevel(inplace=True) def init(channel): channel.send( ( 'data', pickle.dumps( { 'kwargs': { 'targets': targets, 'context': context, }, 'instance': corpus, 'folder_name': 'cooccurrence', }, ) ) ) results = execnet_hub.run( remote_func=sum_folder, iterable=paths_progress_iter, init_func=init, ) results = ([r] for r in results if r is not None) result = next(results)[0] for i, chunk in enumerate(chunked(results, 100)): logger.info('Received result chunk #%s.', i) chunked_result = [c[0] for c in chunk] with Timer() as timed: result = pd.concat( chunked_result + [result], copy=False, ).groupby(level=result.index.names).sum() logger.info( 'Computed the result by merging a chunk of received results and the result in %.2f seconds.', timed.elapsed, ) result = result.to_frame('count') result.reset_index(inplace=True) write_space(output, context, targets, result)
def test_exception(self): # Make sure exceptions properly bubble up. with self.assertRaises(ValueError): with Timer() as timed: calc = 1 + 1 raise ValueError("nopenopenope") print("Should never get here.")
def test_typical(self): with Timer() as timed: time.sleep(0.5) # Ballpark it. self.assertTrue(timed.elapsed > 0.5) self.assertTrue(timed.elapsed < 0.75) # Unless time suddenly starts flowing backwards... self.assertTrue(timed.end > timed.start)
def merge(pairs, what_frame, what_name, time, suffixes=None): kwargs = {'suffixes': suffixes} if suffixes else {} with Timer() as timed: result = pairs.merge(what_frame, left_on=join_columns( what_frame, what_name), right_index=True, how='inner', **kwargs) logger.debug( '%s merge (%s): %.2f seconds', time, what_name, timed.elapsed, ) return result
def test_autocomplete_query(self, query, expected_result): # given (data) query_string = {"q": query} # when with Timer() as timed: response = self.app.get(API_BASE + "/tags/autocomplete/", query_string=query_string, **TestTagsearch.post_args) # then self.assertIsNotNone(response) self.assertEqual(200, response.status_code) response_json = json.loads(response.data) logging.info("Queryed tag:" + query) logging.info("Fetched tags:" + ", ".join([tag for tag in response_json])) logging.info("Time spent: {} ms".format(timed.elapsed * 1000)) for tag in expected_result: self.assertTrue(tag in response_json) pass
def test_autocomplete_query(self, query, expected_result): # given (data) query_string = {"q": query} # when with Timer() as timed: response = self.response( self.app.get(self.TAGS_GET, query_string=query_string, **self.post_args, **self.create_user_header(BaseTest.REGULAR_USER))) # then logging.info("Queried tag:" + query) logging.info("Fetched tags:" + ", ".join([tag for tag in response])) logging.info("Time spent: %s ms" % (str(type((timed.elapsed))))) for tag in expected_result: self.assertTrue(tag in response) pass
def cooccurrence(self, path, targets, context): """Count word co-occurrence in a corpus file.""" logger.debug('Processing %s', path) def join_columns(frame, prefix): # Targets or contexts might be just words, not (word, POS) pairs. if isinstance(frame.index[0], tuple): return prefix, '{}_tag'.format(prefix) return (prefix, ) columns = 'target', 'target_tag', 'context', 'context_tag' target_contexts = peekable( chain.from_iterable( co_occurrences( document_words, window_size_before=self.window_size_before, window_size_after=self.window_size_after, ) for document_words in self.words_by_document(path))) T = (lambda t: t) if isinstance(targets.index[0], tuple) else (lambda t: t[0]) C = (lambda c: c) if isinstance(context.index[0], tuple) else (lambda c: c[0]) first_frame, first_name = targets, 'target' second_frame, second_name = context, 'context' if len(context) < len(targets): first_frame, first_name, second_frame, second_name = (second_frame, second_name, first_frame, first_name) while target_contexts: some_target_contexts = islice( target_contexts, self.chunk_size, ) with Timer() as timed: co_occurrence_pairs = list(some_target_contexts) if not co_occurrence_pairs: continue logger.debug( '%s co-occurrence pairs: %.2f seconds', len(co_occurrence_pairs), timed.elapsed, ) pairs = pd.DataFrame( co_occurrence_pairs, columns=columns, ) def merge(pairs, what_frame, what_name, time, suffixes=None): kwargs = {'suffixes': suffixes} if suffixes else {} with Timer() as timed: result = pairs.merge(what_frame, left_on=join_columns( what_frame, what_name), right_index=True, how='inner', **kwargs) logger.debug( '%s merge (%s): %.2f seconds', time, what_name, timed.elapsed, ) return result pairs = merge(pairs, first_frame, first_name, 'First') pairs = merge( pairs, second_frame, second_name, 'Second', suffixes=('_' + first_name, '_' + second_name), ) with Timer() as timed: counts = pairs.groupby(['id_target', 'id_context']).size() logger.debug( 'Summing up: %.2f seconds', timed.elapsed, ) logger.debug( '%s unique co-occurrence pairs are collected. %s in total.', len(counts), counts.sum(), ) yield counts
from chrono import Timer from cv2 import imshow, waitKey from zoloto import assert_has_gui_components from zoloto.cameras.camera import Camera from zoloto.marker_dict import MarkerDict assert_has_gui_components() class TestCamera(Camera): pass camera = TestCamera(0, marker_dict=MarkerDict.DICT_6X6_50) while True: with Timer() as capture_timer: frame = camera.capture_frame() with Timer() as annotate_timer: camera._annotate_frame(frame) imshow("demo", frame) waitKey(1) print( # noqa: T001 round(capture_timer.elapsed * 1000), round(annotate_timer.elapsed * 1000), end="\r", )
conn.close() def updatePriorityForPath(path, priority, user="******"): conn = Connection(METASERVER) conn.write("updatePriorityForPath", path, priority, user) print(conn.readline()) conn.close() from chrono import Timer from concurrent.futures import * ex = ThreadPoolExecutor(max_workers=50) with Timer() as timer: def f(i): sendFile("small_file.txt", "test/" + str(i)) ex.map(f, range(100)) ex.shutdown() print("elapsed", timer.elapsed) exit(0) sendFile("small_file.txt", "ale/file1") sendFile("small_file.txt", "ale/file2") sendFile("small_file.txt", "ale/file3")
def on_frame(self, frame: ndarray) -> ndarray: with Timer() as annotate_timer: camera._annotate_frame(frame) print(round(annotate_timer.elapsed * 1000), end="\r") # noqa: T001 return frame
('%%AD_INTERSTITIAL_ID_ANDROID%%', ad_interstitial_id_android), ('%%ONE_LINK%%', one_link), ) print('initing app') for dirpath, dirnames, filenames in os.walk('.'): if '.git' in dirpath or '.vscode' in dirpath or '.dart_tool' in dirpath: continue for file_name in filenames: if 'init.py' == file_name: continue try: with open(os.path.join(dirpath, file_name), 'r+') as file: filedata = file.read() for pair in to_change: if pair[0] in filedata: filedata = filedata.replace(pair[0], pair[1]) file.seek(0) file.write(filedata) file.truncate() except UnicodeDecodeError: pass print('resizing icons') resize() print('scrapping content') with Timer() as scrap_time: scrap(scrap_tag, firebase_credentials) print('scrapping took {0}'.format(scrap_time.elapsed))
import librosa import librosa.display import matplotlib.pyplot as plt import numpy as np from chrono import Timer dur = 31 # 168 # hardcoded: should be d[2]["track"]["duration"] offs = float(np.random.choice(dur - 30)) with Timer() as timed: y, sr = librosa.load("/home/cobalt/datasets/fma/fma_small/000/000002.mp3", mono=True, duration=30., offset=offs) print(f"[TIME] loading: {timed.elapsed}") print(f"offset: {offs}; length of y: {len(y)}") hop_length = 2 * 11 print("### computing mel spectrogram ###") with Timer() as timed: S = librosa.feature.melspectrogram(y, sr=sr, n_mels=64, n_fft=2**12, hop_length=hop_length) print(f"[TIME] mel: {timed.elapsed}") # Convert to log scale (dB). We'll use the peak power (max) as reference. log_S = librosa.power_to_db(S, ref=np.max) print("### computing onset envelope ###")