def test_get_template_path(self): """ Tests to make sure the get_template_path function works as expected. """ # if the current site has associated SiteTheme then get_template_path should return the argument as is. with patch( "openedx.core.djangoapps.theming.helpers.current_request_has_associated_site_theme", Mock(return_value=True), ): with patch( "openedx.core.djangoapps.theming.helpers.microsite.is_request_in_microsite", Mock(return_value=True), ): with patch("microsite_configuration.microsite.TEMPLATES_BACKEND") as mock_microsite_backend: mock_microsite_backend.get_template = Mock(return_value="/microsite/about.html") self.assertEqual(theming_helpers.get_template_path("about.html"), "about.html") RequestCache.clear_all_namespaces() # if the current site does not have associated SiteTheme then get_template_path should return microsite override with patch( "openedx.core.djangoapps.theming.helpers.current_request_has_associated_site_theme", Mock(return_value=False), ): with patch( "openedx.core.djangoapps.theming.helpers.microsite.is_request_in_microsite", Mock(return_value=True), ): with patch("microsite_configuration.microsite.TEMPLATES_BACKEND") as mock_microsite_backend: mock_microsite_backend.get_template_path = Mock(return_value="/microsite/about.html") self.assertEqual(theming_helpers.get_template_path("about.html"), "/microsite/about.html")
def instrument_course_progress_render( self, course_width, enable_ccx, view_as_ccx, sql_queries, mongo_reads, ): """ Renders the progress page, instrumenting Mongo reads and SQL queries. """ course_key = self.setup_course(course_width, enable_ccx, view_as_ccx) # Switch to published-only mode to simulate the LMS with self.settings(MODULESTORE_BRANCH='published-only'): # Clear all caches before measuring for cache in settings.CACHES: caches[cache].clear() # Refill the metadata inheritance cache get_course_in_cache(course_key) # We clear the request cache to simulate a new request in the LMS. RequestCache.clear_all_namespaces() # Reset the list of provider classes, so that our django settings changes # can actually take affect. OverrideFieldData.provider_classes = None with self.assertNumQueries(sql_queries, using='default', table_blacklist=QUERY_COUNT_TABLE_BLACKLIST): with self.assertNumQueries(0, using='student_module_history'): with self.assertMongoCallCount(mongo_reads): with self.assertXBlockInstantiations(1): self.grade_course(course_key)
def test_request_context_caching(self): """ Test that the RequestContext is cached in the RequestCache. """ with patch( 'common.djangoapps.edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled assert get_template_request_context() is None with patch( 'common.djangoapps.edxmako.request_context.get_current_request', return_value=self.request): # requestcontext should not be None, and should fill the cache assert get_template_request_context() is not None mock_get_current_request = Mock() with patch( 'common.djangoapps.edxmako.request_context.get_current_request' ): with patch( 'common.djangoapps.edxmako.request_context.RequestContext.__init__' ) as mock_context_init: # requestcontext should not be None, because the cache is filled assert get_template_request_context() is not None mock_context_init.assert_not_called() mock_get_current_request.assert_not_called() RequestCache.clear_all_namespaces() with patch( 'common.djangoapps.edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled assert get_template_request_context() is None
def get_group_info_for_cohort(cohort, use_cached=False): """ Get the ids of the group and partition to which this cohort has been linked as a tuple of (int, int). If the cohort has not been linked to any group/partition, both values in the tuple will be None. The partition group info is cached for the duration of a request. Pass use_cached=True to use the cached value instead of fetching from the database. """ cache = RequestCache(u"cohorts.get_group_info_for_cohort").data cache_key = six.text_type(cohort.id) if use_cached and cache_key in cache: return cache[cache_key] cache.pop(cache_key, None) try: partition_group = CourseUserGroupPartitionGroup.objects.get(course_user_group=cohort) return cache.setdefault(cache_key, (partition_group.group_id, partition_group.partition_id)) except CourseUserGroupPartitionGroup.DoesNotExist: pass return cache.setdefault(cache_key, (None, None))
def render(self, context=None, request=None): """ This takes a render call with a context (from Django) and translates it to a render call on the mako template. When rendering a large sequence of XBlocks, we may end up rendering hundreds of small templates. Even if context processors aren't very expensive individually, they will quickly add up in that situation. To help guard against this, we do context processing once for a given request and then cache it. """ context_object = self._get_context_object(request) request_cache = RequestCache('context_processors') cache_response = request_cache.get_cached_response('cp_output') if cache_response.is_found: context_dictionary = dict(cache_response.value) else: context_dictionary = self._get_context_processors_output_dict(context_object) # The context_dictionary is later updated with template specific # variables. There are potentially hundreds of calls to templates # rendering and we don't want them to interfere with each other, so # we make a copy from the output of the context processors and then # recreate a new dict every time we pull from the cache. request_cache.set('cp_output', dict(context_dictionary)) if isinstance(context, Context): context_dictionary.update(context.flatten()) elif context is not None: context_dictionary.update(context) self._add_core_context(context_dictionary) self._evaluate_lazy_csrf_tokens(context_dictionary) return self.mako_template.render_unicode(**context_dictionary)
def _clear_request_cache(**kwargs): """ Once a celery task completes, clear the request cache to prevent memory leaks. """ if getattr(settings, 'CLEAR_REQUEST_CACHE_ON_TASK_COMPLETION', True): RequestCache.clear_all_namespaces()
def test_request_cached_with_request_cache_getter(self): """ Ensure that calling a decorated function uses request_cache_getter if supplied. """ to_be_wrapped = Mock() to_be_wrapped.side_effect = [1, 2, 3] self.assertEqual(to_be_wrapped.call_count, 0) def mock_wrapper(*args, **kwargs): """Simple wrapper to let us decorate our mock.""" return to_be_wrapped(*args, **kwargs) request_cache_getter = lambda args, kwargs: RequestCache('test') wrapped = request_cached(request_cache_getter=request_cache_getter)(mock_wrapper) # This will be a miss, and make an underlying call. result = wrapped(1) self.assertEqual(result, 1) self.assertEqual(to_be_wrapped.call_count, 1) # This will be a miss, and make an underlying call. result = wrapped(2) self.assertEqual(result, 2) self.assertEqual(to_be_wrapped.call_count, 2) # These will be a hits, and not make an underlying call. result = wrapped(1) self.assertEqual(result, 1) self.assertEqual(to_be_wrapped.call_count, 2) # Ensure the appropriate request cache was used self.assertFalse(RequestCache().data) self.assertTrue(RequestCache('test').data)
def test_setting_override(self, is_enabled, override_choice, expected_result): RequestCache.clear_all_namespaces() self.set_waffle_course_override(override_choice, is_enabled) override_value = WaffleFlagCourseOverrideModel.override_value( self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY) self.assertEqual(override_value, expected_result)
def test_request_cached_with_request_cache_getter(self): """ Ensure that calling a decorated function uses request_cache_getter if supplied. """ to_be_wrapped = Mock() to_be_wrapped.side_effect = [1, 2, 3] assert to_be_wrapped.call_count == 0 def mock_wrapper(*args, **kwargs): """Simple wrapper to let us decorate our mock.""" return to_be_wrapped(*args, **kwargs) request_cache_getter = lambda args, kwargs: RequestCache('test') wrapped = request_cached(request_cache_getter=request_cache_getter)( mock_wrapper) # lint-amnesty, pylint: disable=no-value-for-parameter # This will be a miss, and make an underlying call. result = wrapped(1) assert result == 1 assert to_be_wrapped.call_count == 1 # This will be a miss, and make an underlying call. result = wrapped(2) assert result == 2 assert to_be_wrapped.call_count == 2 # These will be a hits, and not make an underlying call. result = wrapped(1) assert result == 1 assert to_be_wrapped.call_count == 2 # Ensure the appropriate request cache was used assert not RequestCache().data assert RequestCache('test').data
def test_setting_override_multiple_times(self): RequestCache.clear_all_namespaces() self.set_waffle_course_override(self.OVERRIDE_CHOICES.on) self.set_waffle_course_override(self.OVERRIDE_CHOICES.off) override_value = WaffleFlagCourseOverrideModel.override_value( self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY) self.assertEqual(override_value, self.OVERRIDE_CHOICES.off)
def generate_course_expired_fragment_from_key(user, course_key): """ Like `generate_course_expired_fragment`, but using a CourseKey instead of a CourseOverview and using request-level caching. Either returns WebFragment to inject XBlock content into, or None if we shouldn't show a course expired message for this user. """ request_cache = RequestCache('generate_course_expired_fragment_from_key') cache_key = u'message:{},{}'.format(user.id, course_key) cache_response = request_cache.get_cached_response(cache_key) if cache_response.is_found: cached_message = cache_response.value # In this case, there is no message to display. if cached_message is None: return None return generate_fragment_from_message(cached_message) course = CourseOverview.get_from_id(course_key) message = generate_course_expired_message(user, course) request_cache.set(cache_key, message) if message is None: return None return generate_fragment_from_message(message)
def test_setting_override(self, is_enabled, override_choice, expected_result): RequestCache.clear_all_namespaces() self.set_waffle_course_override(override_choice, is_enabled) override_value = WaffleFlagCourseOverrideModel.override_value( self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY ) self.assertEqual(override_value, expected_result)
def _decorator(*args, **kwargs): """ Arguments: args, kwargs: values passed into the wrapped function """ # Check to see if we have a result in cache. If not, invoke our wrapped # function. Cache and return the result to the caller. if request_cache_getter: request_cache = request_cache_getter(args, kwargs) else: request_cache = RequestCache(namespace) if request_cache: cache_key = _func_call_cache_key(f, arg_map_function, *args, **kwargs) cached_response = request_cache.get_cached_response(cache_key) if cached_response.is_found: return cached_response.value result = f(*args, **kwargs) if request_cache: request_cache.set(cache_key, result) return result
def test_request_cached_with_caches_despite_changing_wrapped_result(self): """ Ensure that after caching a result, we always send it back, even if the underlying result changes. """ RequestCache.clear_all_namespaces() to_be_wrapped = Mock() to_be_wrapped.side_effect = [1, 2, 3] self.assertEqual(to_be_wrapped.call_count, 0) def mock_wrapper(*args, **kwargs): """Simple wrapper to let us decorate our mock.""" return to_be_wrapped(*args, **kwargs) wrapped = request_cached(mock_wrapper) result = wrapped() self.assertEqual(result, 1) self.assertEqual(to_be_wrapped.call_count, 1) result = wrapped() self.assertEqual(result, 1) self.assertEqual(to_be_wrapped.call_count, 1) direct_result = mock_wrapper() self.assertEqual(direct_result, 2) self.assertEqual(to_be_wrapped.call_count, 2) result = wrapped() self.assertEqual(result, 1) self.assertEqual(to_be_wrapped.call_count, 2) direct_result = mock_wrapper() self.assertEqual(direct_result, 3) self.assertEqual(to_be_wrapped.call_count, 3)
def test_request_context_caching(self): """ Test that the RequestContext is cached in the RequestCache. """ with patch('edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled self.assertIsNone(get_template_request_context()) with patch('edxmako.request_context.get_current_request', return_value=self.request): # requestcontext should not be None, and should fill the cache self.assertIsNotNone(get_template_request_context()) mock_get_current_request = Mock() with patch('edxmako.request_context.get_current_request', mock_get_current_request): # requestcontext should not be None, because the cache is filled self.assertIsNotNone(get_template_request_context()) mock_get_current_request.assert_not_called() RequestCache.clear_all_namespaces() with patch('edxmako.request_context.get_current_request', return_value=None): # requestcontext should be None, because the cache isn't filled self.assertIsNone(get_template_request_context())
def bulk_cache_cohorts(course_key, users): """ Pre-fetches and caches the cohort assignments for the given users, for later fast retrieval by get_cohort. """ # before populating the cache with another bulk set of data, # remove previously cached entries to keep memory usage low. RequestCache(COHORT_CACHE_NAMESPACE).clear() cache = RequestCache(COHORT_CACHE_NAMESPACE).data if is_course_cohorted(course_key): cohorts_by_user = { membership.user: membership for membership in CohortMembership.objects.filter( user__in=users, course_id=course_key).select_related('user') } for user, membership in six.iteritems(cohorts_by_user): cache[_cohort_cache_key(user.id, course_key)] = membership.course_user_group uncohorted_users = [u for u in users if u not in cohorts_by_user] else: uncohorted_users = users for user in uncohorted_users: cache[_cohort_cache_key(user.id, course_key)] = None
def test_caching_org(self): course = CourseOverviewFactory.create(org='test-org') site_cfg = SiteConfigurationFactory.create( site_values={'course_org_filter': course.org}, values={'course_org_filter': course.org}) org_config = CourseDurationLimitConfig(org=course.org, enabled=True, enabled_as_of=datetime( 2018, 1, 1)) org_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not retrieved from cache after save with self.assertNumQueries(2): self.assertTrue( CourseDurationLimitConfig.current(org=course.org).enabled) RequestCache.clear_all_namespaces() # Check that the org value can be retrieved from cache after read with self.assertNumQueries(0): self.assertTrue( CourseDurationLimitConfig.current(org=course.org).enabled) org_config.enabled = False org_config.save() RequestCache.clear_all_namespaces() # Check that the org value in cache was deleted on save with self.assertNumQueries(2): self.assertFalse( CourseDurationLimitConfig.current(org=course.org).enabled) global_config = CourseDurationLimitConfig(enabled=True, enabled_as_of=datetime( 2018, 1, 1)) global_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not updated in cache by changing the global value with self.assertNumQueries(0): self.assertFalse( CourseDurationLimitConfig.current(org=course.org).enabled) site_config = CourseDurationLimitConfig(site=site_cfg.site, enabled=True, enabled_as_of=datetime( 2018, 1, 1)) site_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not updated in cache by changing the site value with self.assertNumQueries(0): self.assertFalse( CourseDurationLimitConfig.current(org=course.org).enabled)
def test_setting_override_multiple_times(self): RequestCache.clear_all_namespaces() self.set_waffle_course_override(self.OVERRIDE_CHOICES.on) self.set_waffle_course_override(self.OVERRIDE_CHOICES.off) override_value = WaffleFlagCourseOverrideModel.override_value( self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY ) self.assertEqual(override_value, self.OVERRIDE_CHOICES.off)
def test_too_many_courses(self): """ Test that search results are limited to 100 courses, and that they don't blow up the database. """ ContentTypeGatingConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) CourseDurationLimitConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) course_ids = [] # Create 300 courses across 30 organizations for org_num in range(10): org_id = f'org{org_num}' for course_num in range(30): course_name = f'course{org_num}.{course_num}' course_run_name = f'run{org_num}.{course_num}' course = CourseFactory.create(org=org_id, number=course_name, run=course_run_name, emit_signals=True) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED) course_ids.append(course.id) self.setup_user(self.audit_user) # These query counts were found empirically query_counts = [50, 46, 46, 46, 46, 46, 46, 46, 46, 46, 16] ordered_course_ids = sorted( [str(cid) for cid in (course_ids + [c.id for c in self.courses])]) self.clear_caches() for page in range(1, 12): RequestCache.clear_all_namespaces() with self.assertNumQueries(query_counts[page - 1], table_ignorelist=WAFFLE_TABLES): response = self.verify_response(params={ 'page': page, 'page_size': 30 }) assert 'results' in response.data assert response.data['pagination']['count'] == 303 assert len(response.data['results']) == (30 if (page < 11) else 3) assert [c['id'] for c in response.data['results'] ] == ordered_course_ids[((page - 1) * 30):(page * 30)]
def test_too_many_courses(self): """ Test that search results are limited to 100 courses, and that they don't blow up the database. """ ContentTypeGatingConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) CourseDurationLimitConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) course_ids = [] # Create 300 courses across 30 organizations for org_num in range(10): org_id = 'org{}'.format(org_num) for course_num in range(30): course_name = 'course{}.{}'.format(org_num, course_num) course_run_name = 'run{}.{}'.format(org_num, course_num) course = CourseFactory.create(org=org_id, number=course_name, run=course_run_name, emit_signals=True) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED) course_ids.append(course.id) self.setup_user(self.audit_user) # These query counts were found empirically query_counts = [63, 50, 50, 50, 50, 50, 50, 50, 50, 50, 20] ordered_course_ids = sorted( [str(cid) for cid in (course_ids + [c.id for c in self.courses])]) self.clear_caches() for page in range(1, 12): RequestCache.clear_all_namespaces() with self.assertNumQueries(query_counts[page - 1]): response = self.verify_response(params={ 'page': page, 'page_size': 30 }) self.assertIn('results', response.data) self.assertEqual(response.data['pagination']['count'], 303) self.assertEqual(len(response.data['results']), 30 if page < 11 else 3) self.assertEqual([c['id'] for c in response.data['results']], ordered_course_ids[(page - 1) * 30:page * 30])
def lti_consumer_fields_editing_flag(course_id, enabled_for_course=False): """ Yields CourseEditLTIFieldsEnabledFlag record for unit tests Arguments: course_id (CourseLocator): course locator to control this feature for. enabled_for_course (bool): whether feature is enabled for 'course_id' """ RequestCache.clear_all_namespaces() CourseEditLTIFieldsEnabledFlag.objects.create(course_id=course_id, enabled=enabled_for_course) yield
def setUp(self): super().setUp() flag_name = "test_namespace.test_flag" self.waffle_flag = WaffleFlag(flag_name, __name__) request = RequestFactory().request() crum.set_current_request(request) RequestCache.clear_all_namespaces() self.addCleanup(crum.set_current_request, None) self.addCleanup(RequestCache.clear_all_namespaces)
def assert_access_to_gated_content(self, user): """ Verifies access to gated content for the given user is as expected. """ # clear the request cache to flush any cached access results RequestCache.clear_all_namespaces() # access to gating content (seq1) remains constant self.assertTrue(bool(has_access(user, 'load', self.seq1, self.course.id))) # access to gated content (seq2) remains constant, access is prevented in SeqModule loading self.assertTrue(bool(has_access(user, 'load', self.seq2, self.course.id)))
def assert_access_to_gated_content(self, user): """ Verifies access to gated content for the given user is as expected. """ # clear the request cache to flush any cached access results RequestCache.clear_all_namespaces() # access to gating content (seq1) remains constant assert bool(has_access(user, 'load', self.seq1, self.course.id)) # access to gated content (seq2) remains constant, access is prevented in SeqModule loading assert bool(has_access(user, 'load', self.seq2, self.course.id))
def dump_courses_to_neo4j(self, connection_overrides=None, override_cache=False): """ Method that iterates through a list of courses in a modulestore, serializes them, then submits tasks to write them to neo4j. Arguments: connection_overrides (dict): overrides to Neo4j connection parameters specified in `settings.COURSEGRAPH_CONNECTION`. override_cache: serialize the courses even if they'be been recently serialized Returns: two lists--one of the courses that were successfully written to neo4j and one of courses that were not. """ total_number_of_courses = len(self.course_keys) submitted_courses = [] skipped_courses = [] graph = authenticate_and_create_graph(connection_overrides) for index, course_key in enumerate(self.course_keys): # first, clear the request cache to prevent memory leaks RequestCache.clear_all_namespaces() (needs_dump, reason) = should_dump_course(course_key, graph) if not (override_cache or needs_dump): log.info("skipping submitting %s, since it hasn't changed", course_key) skipped_courses.append(str(course_key)) continue if override_cache: reason = "override_cache is True" log.info( "Now submitting %s for export to neo4j, because %s: course %d of %d total courses", course_key, reason, index + 1, total_number_of_courses, ) dump_course_to_neo4j.apply_async(kwargs=dict( course_key_string=str(course_key), connection_overrides=connection_overrides, )) submitted_courses.append(str(course_key)) return submitted_courses, skipped_courses
def setUp(self): super().setUp() flag_name = "test_namespace.test_flag" self.waffle_flag = WaffleFlag( # lint-amnesty, pylint: disable=toggle-missing-annotation flag_name, __name__ ) request = RequestFactory().request() crum.set_current_request(request) RequestCache.clear_all_namespaces() self.addCleanup(crum.set_current_request, None) self.addCleanup(RequestCache.clear_all_namespaces)
def test_caching_site(self): site_cfg = SiteConfigurationFactory() site_config = ContentTypeGatingConfig(site=site_cfg.site, enabled=True, enabled_as_of=datetime(2018, 1, 1)) site_config.save() RequestCache.clear_all_namespaces() # Check that the site value is not retrieved from cache after save with self.assertNumQueries(1): self.assertTrue(ContentTypeGatingConfig.current(site=site_cfg.site).enabled) RequestCache.clear_all_namespaces() # Check that the site value can be retrieved from cache after read with self.assertNumQueries(0): self.assertTrue(ContentTypeGatingConfig.current(site=site_cfg.site).enabled) site_config.enabled = False site_config.save() RequestCache.clear_all_namespaces() # Check that the site value in cache was deleted on save with self.assertNumQueries(1): self.assertFalse(ContentTypeGatingConfig.current(site=site_cfg.site).enabled) global_config = ContentTypeGatingConfig(enabled=True, enabled_as_of=datetime(2018, 1, 1)) global_config.save() RequestCache.clear_all_namespaces() # Check that the site value is not updated in cache by changing the global value with self.assertNumQueries(0): self.assertFalse(ContentTypeGatingConfig.current(site=site_cfg.site).enabled)
def test_caching_site(self): site_cfg = SiteConfigurationFactory() site_config = CourseDurationLimitConfig(site=site_cfg.site, enabled=True, enabled_as_of=datetime(2018, 1, 1)) site_config.save() RequestCache.clear_all_namespaces() # Check that the site value is not retrieved from cache after save with self.assertNumQueries(1): self.assertTrue(CourseDurationLimitConfig.current(site=site_cfg.site).enabled) RequestCache.clear_all_namespaces() # Check that the site value can be retrieved from cache after read with self.assertNumQueries(0): self.assertTrue(CourseDurationLimitConfig.current(site=site_cfg.site).enabled) site_config.enabled = False site_config.save() RequestCache.clear_all_namespaces() # Check that the site value in cache was deleted on save with self.assertNumQueries(1): self.assertFalse(CourseDurationLimitConfig.current(site=site_cfg.site).enabled) global_config = CourseDurationLimitConfig(enabled=True, enabled_as_of=datetime(2018, 1, 1)) global_config.save() RequestCache.clear_all_namespaces() # Check that the site value is not updated in cache by changing the global value with self.assertNumQueries(0): self.assertFalse(CourseDurationLimitConfig.current(site=site_cfg.site).enabled)
def test_too_many_courses(self): """ Test that search results are limited to 100 courses, and that they don't blow up the database. """ ContentTypeGatingConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) CourseDurationLimitConfig.objects.create( enabled=True, enabled_as_of=datetime(2018, 1, 1), ) course_ids = [] # Create 300 courses across 30 organizations for org_num in range(10): org_id = 'org{}'.format(org_num) for course_num in range(30): course_name = 'course{}.{}'.format(org_num, course_num) course_run_name = 'run{}.{}'.format(org_num, course_num) course = CourseFactory.create(org=org_id, number=course_name, run=course_run_name, emit_signals=True) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT) CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED) course_ids.append(course.id) self.setup_user(self.audit_user) # These query counts were found empirically query_counts = [63, 50, 50, 50, 50, 50, 50, 50, 50, 50, 20] ordered_course_ids = sorted([str(cid) for cid in (course_ids + [c.id for c in self.courses])]) self.clear_caches() for page in range(1, 12): RequestCache.clear_all_namespaces() with self.assertNumQueries(query_counts[page - 1]): response = self.verify_response(params={'page': page, 'page_size': 30}) self.assertIn('results', response.data) self.assertEqual(response.data['pagination']['count'], 303) self.assertEqual(len(response.data['results']), 30 if page < 11 else 3) self.assertEqual( [c['id'] for c in response.data['results']], ordered_course_ids[(page - 1) * 30:page * 30] )
def setUp(self): """ Set up test data """ super(ProgramCourseEnrollmentModelTests, self).setUp() RequestCache.clear_all_namespaces() self.user = UserFactory.create() self.program_uuid = uuid4() self.program_enrollment = ProgramEnrollment.objects.create( user=self.user, external_user_key='abc', program_uuid=self.program_uuid, curriculum_uuid=uuid4(), status='enrolled') self.course_key = CourseKey.from_string(generate_course_run_key()) CourseOverviewFactory(id=self.course_key)
def persistent_grades_feature_flags(global_flag, enabled_for_all_courses=False, course_id=None, enabled_for_course=False): """ Most test cases will use a single call to this manager, as they need to set the global setting and the course-specific setting for a single course. """ RequestCache.clear_all_namespaces() PersistentGradesEnabledFlag.objects.create( enabled=global_flag, enabled_for_all_courses=enabled_for_all_courses) if course_id: CoursePersistentGradesFlag.objects.create(course_id=course_id, enabled=enabled_for_course) yield
def _drop_database(self, database=True, collections=True, connections=True): """ A destructive operation to drop the underlying database and close all connections. Intended to be used by test code for cleanup. If database is True, then this should drop the entire database. Otherwise, if collections is True, then this should drop all of the collections used by this modulestore. Otherwise, the modulestore should remove all data from the collections. If connections is True, then close the connection to the database as well. """ RequestCache(namespace="course_index_cache").clear() connection = self.database.client if database: connection.drop_database(self.database.name) elif collections: self.course_index.drop() self.structures.drop() self.definitions.drop() else: self.course_index.remove({}) self.structures.remove({}) self.definitions.remove({}) if connections: connection.close()
def __init__( self, db, collection, host, port=27017, tz_aware=True, user=None, password=None, asset_collection=None, retry_wait_time=0.1, **kwargs # lint-amnesty, pylint: disable=unused-argument ): """ Create & open the connection, authenticate, and provide pointers to the collections """ # Set a write concern of 1, which makes writes complete successfully to the primary # only before returning. Also makes pymongo report write errors. kwargs['w'] = 1 #make sure the course index cache is fresh. RequestCache(namespace="course_index_cache").clear() self.database = connect_to_mongodb(db, host, port=port, tz_aware=tz_aware, user=user, password=password, retry_wait_time=retry_wait_time, **kwargs) self.course_index = self.database[collection + '.active_versions'] self.structures = self.database[collection + '.structures'] self.definitions = self.database[collection + '.definitions']
def inner_wrapper(*args, **kwargs): """ Wrapper function to decorate with. """ # Check to see if we have a result in cache. If not, invoke our wrapped # function. Cache and return the result to the caller. request_cache = RequestCache(namespace) cache_key = _func_call_cache_key(f, *args, **kwargs) cached_response = request_cache.get_cached_response(cache_key) if cached_response.is_found: return cached_response.value result = f(*args, **kwargs) request_cache.set(cache_key, result) return result
def clear_caches(cls): """ Clear all of the caches defined in settings.CACHES. """ # N.B. As of 2016-04-20, Django won't return any caches # from django.core.cache.caches.all() that haven't been # accessed using caches[name] previously, so we loop # over our list of overridden caches, instead. for cache in settings.CACHES: caches[cache].clear() # The sites framework caches in a module-level dictionary. # Clear that. sites.models.SITE_CACHE.clear() RequestCache.clear_all_namespaces()
def get_template_request_context(request=None): """ Returns the template processing context to use for the current request, or returns None if there is not a current request. """ if request is None: request = get_current_request() if request is None: return None request_cache_dict = RequestCache('edxmako').data cache_key = "request_context" if cache_key in request_cache_dict: return request_cache_dict[cache_key] context = RequestContext(request) context['is_secure'] = request.is_secure() context['site'] = safe_get_host(request) request_cache_dict[cache_key] = context return context
def persistent_grades_feature_flags( global_flag, enabled_for_all_courses=False, course_id=None, enabled_for_course=False ): """ Most test cases will use a single call to this manager, as they need to set the global setting and the course-specific setting for a single course. """ RequestCache.clear_all_namespaces() PersistentGradesEnabledFlag.objects.create(enabled=global_flag, enabled_for_all_courses=enabled_for_all_courses) if course_id: CoursePersistentGradesFlag.objects.create(course_id=course_id, enabled=enabled_for_course) yield
def setUp(self): """ Set up test data """ super(ProgramCourseEnrollmentModelTests, self).setUp() RequestCache.clear_all_namespaces() self.user = UserFactory.create() self.program_uuid = uuid4() self.program_enrollment = ProgramEnrollment.objects.create( user=self.user, external_user_key='abc', program_uuid=self.program_uuid, curriculum_uuid=uuid4(), status='enrolled' ) self.course_key = CourseKey.from_string(generate_course_run_key()) CourseOverviewFactory(id=self.course_key)
def test_caching_org(self): course = CourseOverviewFactory.create(org='test-org') site_cfg = SiteConfigurationFactory.create(values={'course_org_filter': course.org}) org_config = ContentTypeGatingConfig(org=course.org, enabled=True, enabled_as_of=datetime(2018, 1, 1)) org_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not retrieved from cache after save with self.assertNumQueries(2): self.assertTrue(ContentTypeGatingConfig.current(org=course.org).enabled) RequestCache.clear_all_namespaces() # Check that the org value can be retrieved from cache after read with self.assertNumQueries(0): self.assertTrue(ContentTypeGatingConfig.current(org=course.org).enabled) org_config.enabled = False org_config.save() RequestCache.clear_all_namespaces() # Check that the org value in cache was deleted on save with self.assertNumQueries(2): self.assertFalse(ContentTypeGatingConfig.current(org=course.org).enabled) global_config = ContentTypeGatingConfig(enabled=True, enabled_as_of=datetime(2018, 1, 1)) global_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not updated in cache by changing the global value with self.assertNumQueries(0): self.assertFalse(ContentTypeGatingConfig.current(org=course.org).enabled) site_config = ContentTypeGatingConfig(site=site_cfg.site, enabled=True, enabled_as_of=datetime(2018, 1, 1)) site_config.save() RequestCache.clear_all_namespaces() # Check that the org value is not updated in cache by changing the site value with self.assertNumQueries(0): self.assertFalse(ContentTypeGatingConfig.current(org=course.org).enabled)
def dump_courses_to_neo4j(self, credentials, override_cache=False): """ Method that iterates through a list of courses in a modulestore, serializes them, then submits tasks to write them to neo4j. Arguments: credentials (dict): the necessary credentials to connect to neo4j and create a py2neo `Graph` object override_cache: serialize the courses even if they'be been recently serialized Returns: two lists--one of the courses that were successfully written to neo4j and one of courses that were not. """ total_number_of_courses = len(self.course_keys) submitted_courses = [] skipped_courses = [] graph = authenticate_and_create_graph(credentials) for index, course_key in enumerate(self.course_keys): # first, clear the request cache to prevent memory leaks RequestCache.clear_all_namespaces() log.info( "Now submitting %s for export to neo4j: course %d of %d total courses", course_key, index + 1, total_number_of_courses, ) if not (override_cache or should_dump_course(course_key, graph)): log.info("skipping submitting %s, since it hasn't changed", course_key) skipped_courses.append(six.text_type(course_key)) continue dump_course_to_neo4j.apply_async( args=[six.text_type(course_key), credentials], ) submitted_courses.append(six.text_type(course_key)) return submitted_courses, skipped_courses
def test_caching_global(self): global_config = ContentTypeGatingConfig(enabled=True, enabled_as_of=datetime(2018, 1, 1)) global_config.save() RequestCache.clear_all_namespaces() # Check that the global value is not retrieved from cache after save with self.assertNumQueries(1): self.assertTrue(ContentTypeGatingConfig.current().enabled) RequestCache.clear_all_namespaces() # Check that the global value can be retrieved from cache after read with self.assertNumQueries(0): self.assertTrue(ContentTypeGatingConfig.current().enabled) global_config.enabled = False global_config.save() RequestCache.clear_all_namespaces() # Check that the global value in cache was deleted on save with self.assertNumQueries(1): self.assertFalse(ContentTypeGatingConfig.current().enabled)
def setUp(self): super(SocialAuthEnrollmentCompletionSignalTest, self).setUp() RequestCache.clear_all_namespaces() catalog_org = CatalogOrganizationFactory.create(key=self.organization.short_name) self.program_uuid = self._create_catalog_program(catalog_org)['uuid']
def get_cohort(user, course_key, assign=True, use_cached=False): """ Returns the user's cohort for the specified course. The cohort for the user is cached for the duration of a request. Pass use_cached=True to use the cached value instead of fetching from the database. Arguments: user: a Django User object. course_key: CourseKey assign (bool): if False then we don't assign a group to user use_cached (bool): Whether to use the cached value or fetch from database. Returns: A CourseUserGroup object if the course is cohorted and the User has a cohort, else None. Raises: ValueError if the CourseKey doesn't exist. """ if user.is_anonymous: return None cache = RequestCache(COHORT_CACHE_NAMESPACE).data cache_key = _cohort_cache_key(user.id, course_key) if use_cached and cache_key in cache: return cache[cache_key] cache.pop(cache_key, None) # First check whether the course is cohorted (users shouldn't be in a cohort # in non-cohorted courses, but settings can change after course starts) if not is_course_cohorted(course_key): return cache.setdefault(cache_key, None) # If course is cohorted, check if the user already has a cohort. try: membership = CohortMembership.objects.get( course_id=course_key, user_id=user.id, ) return cache.setdefault(cache_key, membership.course_user_group) except CohortMembership.DoesNotExist: # Didn't find the group. If we do not want to assign, return here. if not assign: # Do not cache the cohort here, because in the next call assign # may be True, and we will have to assign the user a cohort. return None # Otherwise assign the user a cohort. try: # If learner has been pre-registered in a cohort, get that cohort. Otherwise assign to a random cohort. course_user_group = None for assignment in UnregisteredLearnerCohortAssignments.objects.filter(email=user.email, course_id=course_key): course_user_group = assignment.course_user_group assignment.delete() break else: course_user_group = get_random_cohort(course_key) add_user_to_cohort(course_user_group, user) return course_user_group except ValueError: # user already in cohort return course_user_group except IntegrityError as integrity_error: # An IntegrityError is raised when multiple workers attempt to # create the same row in one of the cohort model entries: # CourseCohort, CohortMembership. log.info( u"HANDLING_INTEGRITY_ERROR: IntegrityError encountered for course '%s' and user '%s': %s", course_key, user.id, six.text_type(integrity_error) ) return get_cohort(user, course_key, assign, use_cached)
def setUp(self): super(TestCourseWaffleFlag, self).setUp() request = RequestFactory().request() self.addCleanup(crum.set_current_request, None) crum.set_current_request(request) RequestCache.clear_all_namespaces()
def setUp(self): RequestCache.clear_all_namespaces()