def test_remove_content_xml(self): """ Test behavior of remove_content_xml flag. """ resource_dict = as_json(self.client.get( "{repo_base}{repo_slug}/learning_resources/{lr_id}/".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, lr_id=self.resource.id ) )) self.assertTrue("content_xml" in resource_dict) without_content_xml_dict = as_json( self.client.get( "{repo_base}{repo_slug}/learning_resources/" "{lr_id}/?remove_content_xml=true".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, lr_id=self.resource.id ) ) ) self.assertFalse("content_xml" in without_content_xml_dict) del resource_dict["content_xml"] self.assertEqual(without_content_xml_dict, resource_dict)
def test_vocabulary_pagination(self): """Test pagination for collections""" expected = [ Vocabulary.objects.create( repository=self.repo, name="name{i}".format(i=i), description="description", required=True, vocabulary_type=Vocabulary.FREE_TAGGING, weight=1000, ) for i in range(40)] resp = self.client.get( '{repo_base}{repo_slug}/vocabularies/'.format( repo_slug=self.repo.slug, repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) vocabularies = as_json(resp) self.assertEqual(40, vocabularies['count']) self.assertEqual([VocabularySerializer(x).data for x in expected[:20]], vocabularies['results']) resp = self.client.get( '{repo_base}{repo_slug}/vocabularies/?page=2'.format( repo_slug=self.repo.slug, repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) vocabularies = as_json(resp) self.assertEqual(40, vocabularies['count']) self.assertEqual([VocabularySerializer(x).data for x in expected[20:40]], vocabularies['results'])
def test_vocabulary_pagination(self): """Test pagination for collections""" # Ordering by ID is required, because due to the ORM's laziness, # grabbing vocabs[0] later might return a different item. vocabs = Vocabulary.objects.filter( repository__id=self.repo.id).order_by('id') self.assertEqual(vocabs.count(), 0) expected = [ VocabularySerializer(Vocabulary.objects.create( repository=self.repo, name="name{i}".format(i=i), description="description", required=True, vocabulary_type=Vocabulary.FREE_TAGGING, weight=1000, )).data for i in range(40)] expected.sort(key=lambda x: x["id"]) resp = self.client.get( '{repo_base}{repo_slug}/vocabularies/'.format( repo_slug=self.repo.slug, repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) vocabularies = as_json(resp) self.assertEqual(40, vocabularies['count']) # Sort both lists in preparation for comparisons. expected.sort(key=lambda x: x["id"]) from_api = sorted(vocabularies['results'], key=lambda x: x["id"]) expected_count = 20 self.assertEqual(expected_count, len(from_api)) self.assertEqual( expected[:expected_count], from_api, ) resp = self.client.get( '{repo_base}{repo_slug}/vocabularies/?page=2'.format( repo_slug=self.repo.slug, repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) vocabularies = as_json(resp) from_api = sorted(vocabularies['results'], key=lambda x: x["id"]) self.assertEqual(expected_count, len(from_api)) self.assertEqual(40, vocabularies['count']) self.assertEqual( from_api, expected[expected_count:expected_count*2], )
def test_term_pagination(self): """Test pagination for collections""" vocab_slug = self.create_vocabulary(self.repo.slug)['slug'] expected = [ self.create_term( self.repo.slug, vocab_slug, { "label": "name{i}".format(i=i), "weight": 1000, } ) for i in range(40)] terms = self.get_terms(self.repo.slug, vocab_slug) self.assertEqual(40, terms['count']) self.assertEqual([TermSerializer(x).data for x in expected[:20]], terms['results']) resp = self.client.get( '{repo_base}{repo_slug}/vocabularies/' '{vocab_slug}/terms/?page=2'.format( repo_slug=self.repo.slug, repo_base=REPO_BASE, vocab_slug=vocab_slug, )) self.assertEqual(HTTP_200_OK, resp.status_code) terms = as_json(resp) self.assertEqual(40, terms['count']) self.assertEqual([TermSerializer(x).data for x in expected[20:40]], terms['results'])
def test_repository_pagination(self): """Test pagination for collections""" expected = [ self.create_repository( { 'name': "name{i}".format(i=i), "description": "description" } ) for i in range(40)] repositories = self.get_repositories() # 40 we created + self.repo self.assertEqual(41, repositories['count']) self.assertEqual( [RepositorySerializer( Repository.objects.get(id=x['id'])).data for x in expected[:19]], repositories['results'][1:20]) resp = self.client.get( '{repo_base}?page=2'.format( repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) repositories = as_json(resp) self.assertEqual(41, repositories['count']) self.assertEqual( [RepositorySerializer(Repository.objects.get(id=x['id'])).data for x in expected[19:39]], repositories['results'])
def get_filtered(ids=None, vocab_slug=None, types=None, expected_status=HTTP_200_OK): """Return list of LearningResources in shopping cart.""" url_base = "{repo_base}{repo_slug}/learning_resources/".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, ) params = [] if ids is not None: params.append("id={ids}".format( ids=",".join([str(s) for s in ids]) )) if vocab_slug is not None: params.append("vocab_slug={slug}".format( slug=vocab_slug )) if types is not None: for type_name in types: params.append("type_name={name}".format(name=type_name)) params_line = "" if params: params_line = "?" + "&".join(params) resp = self.client.get("{url_base}{params_line}".format( url_base=url_base, params_line=params_line )) self.assertEqual(expected_status, resp.status_code) if expected_status == HTTP_200_OK: return sorted([x['id'] for x in as_json(resp)['results']])
def test_repository_pagination(self): """Test pagination for collections""" expected = [ self.create_repository({ 'name': "name{i}".format(i=i), "description": "description" }) for i in range(40) ] repositories = self.get_repositories() # 40 we created + self.repo self.assertEqual(41, repositories['count']) self.assertEqual([ RepositorySerializer(Repository.objects.get(id=x['id'])).data for x in expected[:19] ], repositories['results'][1:20]) resp = self.client.get('{repo_base}?page=2'.format( repo_base=REPO_BASE, )) self.assertEqual(HTTP_200_OK, resp.status_code) repositories = as_json(resp) self.assertEqual(41, repositories['count']) self.assertEqual([ RepositorySerializer(Repository.objects.get(id=x['id'])).data for x in expected[19:39] ], repositories['results'])
def test_learning_resource_types(self): """ Get from learning_resource_types. """ base_url = "{}learning_resource_types/".format(API_BASE) resp = self.client.get(base_url) self.assertEqual(HTTP_200_OK, resp.status_code) types = as_json(resp) self.assertEqual(sorted([lrt.name for lrt in LearningResourceType.objects.all()]), sorted([t['name'] for t in types['results']])) # nothing besides GET, OPTION, HEAD allowed resp = self.client.options(base_url) self.assertEqual(HTTP_200_OK, resp.status_code) resp = self.client.head(base_url) self.assertEqual(HTTP_200_OK, resp.status_code) resp = self.client.post(base_url, {}) self.assertEqual(HTTP_405_METHOD_NOT_ALLOWED, resp.status_code) resp = self.client.patch(base_url, {}) self.assertEqual(HTTP_405_METHOD_NOT_ALLOWED, resp.status_code) resp = self.client.put(base_url, {}) self.assertEqual(HTTP_405_METHOD_NOT_ALLOWED, resp.status_code) # restricted to logged in users self.logout() resp = self.client.get(base_url) self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code) # but otherwise unrestricted self.login(self.user_norepo) resp = self.client.get(base_url) self.assertEqual(HTTP_200_OK, resp.status_code) types = as_json(resp) self.assertEqual(sorted([lrt.name for lrt in LearningResourceType.objects.all()]), sorted([t['name'] for t in types['results']]))
def get_filtered(ids, expected_status=HTTP_200_OK): """Return list of LearningResources in shopping cart.""" url_base = "{repo_base}{repo_slug}/learning_resources/".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, ) resp = self.client.get("{url_base}?id={ids}".format( url_base=url_base, ids=",".join([str(s) for s in ids]) )) self.assertEqual(expected_status, resp.status_code) if expected_status == HTTP_200_OK: return sorted([x['id'] for x in as_json(resp)['results']])
def test_vocabulary_filter_type(self): """Test filtering learning resource types for vocabularies""" self.create_vocabulary(self.repo.slug) learning_resource_type = LearningResourceType.objects.first() # in the future this should be handled within the API Vocabulary.objects.first().learning_resource_types.add( learning_resource_type ) resp = self.client.get("{repo_base}{repo_slug}/vocabularies/".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, )) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(1, as_json(resp)['count']) resp = self.client.get( "{repo_base}{repo_slug}" "/vocabularies/?type_name={name}".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, name=learning_resource_type.name, )) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(1, as_json(resp)['count']) resp = self.client.get( "{repo_base}{repo_slug}" "/vocabularies/?type_name={name}".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, name="missing", )) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(0, as_json(resp)['count'])
def test_blank(self): """ Verify that a blank search will retrieve all results. """ self.import_course_tarball(self.repo) no_q_url = "{repo_base}{repo_slug}/search/".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, ) count = self.get_results()['count'] self.assertEqual(LearningResource.objects.count(), count) resp = self.client.get(no_q_url) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(count, as_json(resp)['count'])
def assert_page_size(self, page_size, expected_num_results): """ Helper function to assert len(results) == expected_num_results. """ num_types = LearningResourceType.objects.filter().count() if page_size is None: page_size_param = "" else: page_size_param = "?{query_param}={page_size}".format( query_param=LorePagination.page_size_query_param, page_size=page_size) resp = self.client.get( "{api_base}learning_resource_types/{page_size_param}".format( api_base=API_BASE, page_size_param=page_size_param)) self.assertEqual(resp.status_code, HTTP_200_OK) resources = as_json(resp) self.assertEqual(len(resources['results']), expected_num_results) self.assertEqual(resources['count'], num_types)
def get_results(self, query="", selected_facets=None, sortby=""): """Helper method to get search results.""" if selected_facets is None: selected_facets = [] selected_facets_arg = "" for facet in selected_facets: selected_facets_arg += "&selected_facets={facet}".format( facet=urllib_parse.quote_plus(facet) ) resp = self.client.get( "{repo_base}{repo_slug}/search/?q={query}{facets}" "&sortby={sortby}".format( repo_base=REPO_BASE, repo_slug=self.repo.slug, query=urllib_parse.quote_plus(query), facets=selected_facets_arg, sortby=sortby ) ) self.assertEqual(HTTP_200_OK, resp.status_code) return as_json(resp)
def assert_page_size(self, page_size, expected_num_results): """ Helper function to assert len(results) == expected_num_results. """ num_types = LearningResourceType.objects.filter().count() if page_size is None: page_size_param = "" else: page_size_param = "?{query_param}={page_size}".format( query_param=LorePagination.page_size_query_param, page_size=page_size ) resp = self.client.get( "{api_base}learning_resource_types/{page_size_param}".format( api_base=API_BASE, page_size_param=page_size_param ) ) self.assertEqual(resp.status_code, HTTP_200_OK) resources = as_json(resp) self.assertEqual(len(resources['results']), expected_num_results) self.assertEqual(resources['count'], num_types)
def test_create_export_task(self): """Test a basic export.""" self.import_course_tarball(self.repo) resources = LearningResource.objects.filter( course__repository__id=self.repo.id).all() for resource in resources: self.create_learning_resource_export(self.repo.slug, { "id": resource.id }) # Skip first one to test that it's excluded from export. resource_ids = [r.id for r in resources[1:]] # Missing task_info. self.create_task( { "ids": resource_ids, "task_type": EXPORT_TASK_TYPE }, expected_status=HTTP_400_BAD_REQUEST ) # Missing repo slug. self.create_task( { "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": {} }, expected_status=HTTP_400_BAD_REQUEST ) # Missing ids. self.create_task( { "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST ) # Missing task_type. self.create_task( { "ids": resource_ids, "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST ) # Invalid task type. self.create_task( { "ids": resource_ids, "task_type": "missing", "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST ) # Invalid repo. self.create_task( { "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": "missing", }, }, expected_status=HTTP_404_NOT_FOUND ) # User doesn't own repo. client2 = Client() client2.login( username=self.user_norepo.username, password=self.PASSWORD ) resp = client2.post( "{base}tasks/".format(base=API_BASE), json.dumps({ "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": self.repo.slug } }), content_type='application/json' ) self.assertEqual(resp.status_code, HTTP_403_FORBIDDEN) # Start export task. Due to CELERY_ALWAYS_EAGER setting this will # block until it completes in testing. task_id = self.create_task( { "task_info": { "repo_slug": self.repo.slug, # Skip first one to test that it's excluded from export. "ids": [r.id for r in resources[1:]], }, "task_type": EXPORT_TASK_TYPE } )['id'] # Before we move on, confirm that other user can't see these tasks. resp = client2.get("{base}tasks/".format(base=API_BASE)) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(as_json(resp)['count'], 0) result = self.get_tasks()['results'][0] self.assertEqual(task_id, result['id']) self.assertEqual("success", result['status']) self.assertEqual(result['task_type'], EXPORT_TASK_TYPE) self.assertTrue(result['result']['url'].startswith( "/media/resource_exports/test_exports.tar")) # webGLDemo.css shows up twice self.assertTrue(result['result']['collision']) with self.settings( DEFAULT_FILE_STORAGE='storages.backends.s3boto.S3BotoStorage' ): # change the default file storage to S3 reload_module(ui.urls) # the view is not available any more resp = self.client.get(result['result']['url']) self.assertEqual(resp.status_code, HTTP_404_NOT_FOUND) # Update for change in file storage. reload_module(ui.urls) resp = self.client.get(result['result']['url']) self.assertEqual(HTTP_200_OK, resp.status_code) tempdir = mkdtemp() def make_path(resource): """Create a path that should exist for a resource.""" type_name = resource.learning_resource_type.name return os.path.join( tempdir, type_name, "{id}_{url_name}.xml".format( id=resource.id, url_name=slugify(resource.url_name)[:200], ) ) try: fakefile = BytesIO(b"".join(resp.streaming_content)) with tarfile.open(fileobj=fakefile, mode="r:gz") as tar: tar.extractall(path=tempdir) self.assertFalse(os.path.isfile(make_path(resources[0]))) assert_resource_directory(self, resources[1:], tempdir) finally: rmtree(tempdir)
def test_create_export_task(self): """Test a basic export.""" self.import_course_tarball(self.repo) resources = LearningResource.objects.filter( course__repository__id=self.repo.id).all() for resource in resources: self.create_learning_resource_export(self.repo.slug, {"id": resource.id}) # Skip first one to test that it's excluded from export. resource_ids = [r.id for r in resources[1:]] # Missing task_info. self.create_task({ "ids": resource_ids, "task_type": EXPORT_TASK_TYPE }, expected_status=HTTP_400_BAD_REQUEST) # Missing repo slug. self.create_task( { "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": {} }, expected_status=HTTP_400_BAD_REQUEST) # Missing ids. self.create_task( { "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST) # Missing task_type. self.create_task( { "ids": resource_ids, "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST) # Invalid task type. self.create_task( { "ids": resource_ids, "task_type": "missing", "task_info": { "repo_slug": self.repo.slug, }, }, expected_status=HTTP_400_BAD_REQUEST) # Invalid repo. self.create_task( { "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": "missing", }, }, expected_status=HTTP_404_NOT_FOUND) # User doesn't own repo. client2 = Client() client2.login(username=self.user_norepo.username, password=self.PASSWORD) resp = client2.post("{base}tasks/".format(base=API_BASE), json.dumps({ "ids": resource_ids, "task_type": EXPORT_TASK_TYPE, "task_info": { "repo_slug": self.repo.slug } }), content_type='application/json') self.assertEqual(resp.status_code, HTTP_403_FORBIDDEN) # Start export task. Due to CELERY_ALWAYS_EAGER setting this will # block until it completes in testing. task_id = self.create_task({ "task_info": { "repo_slug": self.repo.slug, # Skip first one to test that it's excluded from export. "ids": [r.id for r in resources[1:]], }, "task_type": EXPORT_TASK_TYPE })['id'] # Before we move on, confirm that other user can't see these tasks. resp = client2.get("{base}tasks/".format(base=API_BASE)) self.assertEqual(resp.status_code, HTTP_200_OK) self.assertEqual(as_json(resp)['count'], 0) result = self.get_tasks()['results'][0] self.assertEqual(task_id, result['id']) self.assertEqual("success", result['status']) self.assertEqual(result['task_type'], EXPORT_TASK_TYPE) self.assertTrue(result['result']['url'].startswith( "/media/resource_exports/test_exports.tar")) # webGLDemo.css shows up twice self.assertTrue(result['result']['collision']) with self.settings( DEFAULT_FILE_STORAGE='storages.backends.s3boto.S3BotoStorage'): # change the default file storage to S3 reload_module(ui.urls) # the view is not available any more resp = self.client.get(result['result']['url']) self.assertEqual(resp.status_code, HTTP_404_NOT_FOUND) # Update for change in file storage. reload_module(ui.urls) resp = self.client.get(result['result']['url']) self.assertEqual(HTTP_200_OK, resp.status_code) tempdir = mkdtemp() def make_path(resource): """Create a path that should exist for a resource.""" type_name = resource.learning_resource_type.name return os.path.join( tempdir, type_name, "{id}_{url_name}.xml".format( id=resource.id, url_name=slugify(resource.url_name)[:200], )) try: fakefile = BytesIO(b"".join(resp.streaming_content)) with tarfile.open(fileobj=fakefile, mode="r:gz") as tar: tar.extractall(path=tempdir) self.assertFalse(os.path.isfile(make_path(resources[0]))) assert_resource_directory(self, resources[1:], tempdir) finally: rmtree(tempdir)