def test_hugo_menu_yaml_serialize(omnibus_config): """HugoMenuYamlFileSerializer.serialize should create the expected file contents""" nav_menu_config_item = omnibus_config.find_item_by_name("navmenu") assert nav_menu_config_item is not None # Create page object referred to in the menu data WebsiteContentFactory.create( text_id=EXAMPLE_UUIDS[0], is_page_content=True, dirpath="path/to", filename="myfile", ) example_menu_data = get_example_menu_data() content = WebsiteContentFactory.build( is_page_content=False, type=nav_menu_config_item.name, metadata={"mainmenu": example_menu_data}, ) serialized_data = HugoMenuYamlFileSerializer(omnibus_config).serialize(content) parsed_serialized_data = yaml.load(serialized_data, Loader=yaml.SafeLoader) assert parsed_serialized_data == { "mainmenu": [ {**example_menu_data[0], "url": "/path/to/myfile"}, example_menu_data[1], ] }
def permission_groups(): """Set up groups, users and websites for permission testing""" ( global_admin, global_author, site_owner, site_admin, site_editor, ) = UserFactory.create_batch(5) websites = WebsiteFactory.create_batch(2, owner=site_owner) global_admin.groups.add(Group.objects.get(name=constants.GLOBAL_ADMIN)) global_author.groups.add(Group.objects.get(name=constants.GLOBAL_AUTHOR)) site_admin.groups.add(websites[0].admin_group) site_editor.groups.add(websites[0].editor_group) website = websites[0] owner_content = WebsiteContentFactory.create(website=website, owner=website.owner) editor_content = WebsiteContentFactory.create(website=website, owner=site_editor) yield SimpleNamespace( global_admin=global_admin, global_author=global_author, site_admin=site_admin, site_editor=site_editor, websites=websites, owner_content=owner_content, editor_content=editor_content, )
def test_websites_content_publish_sorting(drf_client, global_admin_user, published): # pylint: disable=unused-argument """should be able to filter to just published or not""" drf_client.force_login(global_admin_user) website = WebsiteFactory.create(published=True) unpublished = WebsiteContentFactory.create_batch( 3, website=website, # they were created after the publish date created_on=website.publish_date + datetime.timedelta(days=2), ) published = WebsiteContentFactory.create_batch( 3, website=website, ) for content in published: content.created_on = website.publish_date - datetime.timedelta(days=2) content.save() api_url = reverse( "websites_content_api-list", kwargs={ "parent_lookup_website": website.name, }, ) resp = drf_client.get(api_url, {"published": published}) content = published if published else unpublished expected_ids = sorted([c.text_id for c in content]) assert resp.data["count"] == 3 assert expected_ids == sorted([c["text_id"] for c in resp.data["results"]])
def test_baseurl_replacer_replaces_content_in_same_course( website_uuid, should_markdown_change ): """ Double check that if the dirpath + filename match multiple times, the content chosen is from the same course as the markdown being edited """ markdown = R""" Kittens [meow]({{< baseurl >}}/resources/pets/cat) meow. """ w1 = WebsiteFactory.build(uuid="website-uuid-111") w2 = WebsiteFactory.build(uuid="website-uuid-222") websites = {w.uuid: w for w in [w1, w2]} target_content = WebsiteContentFactory.build(markdown=markdown, website=w1) linkable = WebsiteContentFactory.build( website=websites[website_uuid], dirpath="content/resources/pets", filename="cat", text_id="uuid-111", ) cleaner = get_markdown_cleaner([linkable]) cleaner.update_website_content(target_content) is_markdown_changed = target_content.markdown != markdown assert is_markdown_changed == should_markdown_change
def get_updated_content_and_parent(update_field): """Run update_content_from_s3_data with test data and return content, parent""" website = WebsiteFactory.build() content = WebsiteContentFactory.build( markdown="original markdown", metadata={"title": "original title"}, website=website, ) content.save = Mock() # prepare the parent, but do not set content.parent_id. # that's one of the things we'll test parent = WebsiteContentFactory.build(id=123) s3_content_data = { "markdown": "s3 markdown", "metadata": { "title": "s3 title", "author": "s3 author", "parent_uid": "s3_parent_uid", }, "parent": parent, } with patch("websites.models.WebsiteContent.objects") as mock: mock.filter.return_value.first.return_value = content website = content.website text_id = content.text_id update_content_from_s3_data(website, text_id, s3_content_data, update_field) return content, parent
def test_update_sync_status(file_errors, site_errors, status): """update_sync_status should update the website sync_status field as expected""" now = now_in_utc() website = WebsiteFactory.create(synced_on=now, sync_status=WebsiteSyncStatus.PROCESSING, sync_errors=site_errors) for error in file_errors: DriveFileFactory.create( website=website, sync_error=error, sync_dt=now, resource=(WebsiteContentFactory.create(type=CONTENT_TYPE_RESOURCE, website=website) if not error else None), status=(DriveFileStatus.COMPLETE if error is None else DriveFileStatus.FAILED), ) DriveFileFactory.create( website=website, sync_dt=now_in_utc() + timedelta(seconds=10), resource=WebsiteContentFactory.create(type=CONTENT_TYPE_RESOURCE, website=website), ) update_sync_status(website, now) website.refresh_from_db() assert website.sync_status == status assert sorted(website.sync_errors) == sorted( [error for error in file_errors if error] + (site_errors or []))
def test_websites_content_list_multiple_type(drf_client, global_admin_user): """The list view of WebsiteContent should be able to filter by multiple type values""" drf_client.force_login(global_admin_user) website = WebsiteFactory.create() WebsiteContentFactory.create_batch( 3, website=website, type=factory.Iterator(["page", "resource", "other"]), ) api_url = reverse( "websites_content_api-list", kwargs={ "parent_lookup_website": website.name, }, ) resp = drf_client.get( api_url, { "type[0]": "page", "type[1]": "resource" }, ) assert resp.data["count"] == 2 results = resp.data["results"] assert {result["type"] for result in results} == {"page", "resource"}
def test_baseurl_replacer_handle_specific_url_replacements( url, content_relative_dirpath, filename ): """ Test specific replacements This test could perhaps be dropped. It was written before ContentLookup was moved to a separate module, and the functionality is tested their, now, too. """ website_uuid = "website-uuid" website = WebsiteFactory.build(uuid=website_uuid) markdown = f"my [pets]({{{{< baseurl >}}}}{url}) are legion" expected_markdown = 'my {{% resource_link content-uuid "pets" %}} are legion' target_content = WebsiteContentFactory.build(markdown=markdown, website=website) linkable = WebsiteContentFactory.build( website=website, dirpath=f"content{content_relative_dirpath}", filename=filename, text_id="content-uuid", ) cleaner = get_markdown_cleaner([linkable]) cleaner.update_website_content(target_content) assert target_content.markdown == expected_markdown
def test_resolveuid_conversion_within_same_site(markdown, expected): """Check shortcodes are used within same site.""" website = WebsiteFactory.build() target_content = WebsiteContentFactory.build(markdown=markdown, website=website) linked_content = WebsiteContentFactory.build( text_id="5cf754b2-b97b-4ac1-8dab-deed1201de94", website=website ) cleaner = get_markdown_cleaner([target_content, linked_content]) cleaner.update_website_content(target_content) assert target_content.markdown == expected
def test_updates_multiple_metadata_fields(): """ Check that a single call to update_website_content modifies multiple fields for rules that have multiple fields associated. """ assert len(MetadataRelativeUrlsFix.fields) > 1 website = WebsiteFactory.build(name="site-1") wc1 = WebsiteContentFactory.build( filename="thing1", dirpath="content/resources", website=website, ) wc2 = WebsiteContentFactory.build(filename="thing2", dirpath="content/pages/two", website=website) content_to_clean = WebsiteContentFactory.build( metadata={ "related_resources_text": """Hello Change this: [to thing1](resources/thing1#fragment "And a title!") cool' Leave this alone: [wiki](https://wikipedia.org) same And this [course link](/courses/8-02/pages/jigawatts) """, "image_metadata": { "caption": "And now [thing2](pages/two/thing2)" }, }, website=website, ) cleaner = get_markdown_cleaner([wc1, wc2]) cleaner.update_website_content(content_to_clean) expected_related_resources = """Hello Change this: [to thing1](/courses/site-1/resources/thing1#fragment) cool' Leave this alone: [wiki](https://wikipedia.org) same And this [course link](/courses/8-02/pages/jigawatts) """ expected_caption = "And now [thing2](/courses/site-1/pages/two/thing2)" assert (content_to_clean.metadata["related_resources_text"] == expected_related_resources) assert content_to_clean.metadata["image_metadata"][ "caption"] == expected_caption
def test_hugo_file_serialize(markdown, exp_sections): """HugoMarkdownFileSerializer.serialize should create the expected file contents""" metadata = {"metadata1": "dummy value 1", "metadata2": "dummy value 2"} content = WebsiteContentFactory.create( text_id="abcdefg", title="Content Title", type="sometype", markdown=markdown, metadata=metadata, ) site_config = SiteConfig(content.website.starter.config) file_content = HugoMarkdownFileSerializer(site_config).serialize( website_content=content ) md_file_sections = [ part for part in re.split(re.compile(r"^---\n", re.MULTILINE), file_content) # re.split returns a blank string as the first item here even though the file contents begin with the given # pattern. if part ] assert len(md_file_sections) == exp_sections front_matter = md_file_sections[0] front_matter_lines = list(filter(None, sorted(front_matter.split("\n")))) assert front_matter_lines == sorted( [ f"title: {content.title}", f"content_type: {content.type}", f"uid: {content.text_id}", ] + [f"{k}: {v}" for k, v in metadata.items()] ) if exp_sections > 1: assert md_file_sections[1] == markdown
def test_websitecontent_autogen_filename_unique(mocker, filename_base, existing_filenames, exp_result_filename, exclude_content): """ get_valid_new_filename should return a filename that obeys uniqueness constraints, adding a suffix and removing characters from the end of the string as necessary. """ # Set a lower limit for max filename length to test that filenames are truncated appropriately mocker.patch("websites.api.CONTENT_FILENAME_MAX_LEN", 14) content_type = "page" dirpath = "path/to" website = WebsiteFactory.create() contents = WebsiteContentFactory.create_batch( len(existing_filenames), website=website, type=content_type, dirpath=dirpath, filename=factory.Iterator(existing_filenames), ) exclude_text_id = contents[ 0].text_id if exclude_content and contents else None assert (get_valid_new_filename( website_pk=website.pk, dirpath=dirpath, filename_base=filename_base, exclude_text_id=exclude_text_id, ) == (exp_result_filename if not exclude_content else filename_base))
def test_websites_content_edit_with_upload(mocker, drf_client, global_admin_user, file_upload): """Uploading a file when editing a new WebsiteContent object should work""" mime_type = "text/doof" mocker.patch("websites.serializers.detect_mime_type", return_value=mime_type) drf_client.force_login(global_admin_user) content = WebsiteContentFactory.create( type=constants.CONTENT_TYPE_RESOURCE, metadata={"title": "test"}) payload = {"file": file_upload, "title": "New Title"} resp = drf_client.patch( reverse( "websites_content_api-detail", kwargs={ "parent_lookup_website": content.website.name, "text_id": str(content.text_id), }, ), data=payload, format="multipart", ) assert resp.status_code == 200 content = WebsiteContent.objects.get(id=content.id) assert content.title == payload["title"] assert ( content.file.name == f"sites/{content.website.name}/{content.text_id.replace('-', '')}_{file_upload.name}" ) assert content.metadata["file_type"] == mime_type assert resp.data["text_id"] == str(content.text_id)
def test_get_destination_filepath_errors(mocker, has_missing_name, is_bad_config_item): """ get_destination_filepath should log an error and return None if the site config is missing the given name, or if the config item does not have a properly configured destination. """ patched_log = mocker.patch("content_sync.utils.log") # From basic-site-config.yml config_item_name = "blog" if is_bad_config_item: mocker.patch.object( SiteConfig, "find_item_by_name", return_value=ConfigItem(item={ "name": config_item_name, "poorly": "configured" }), ) starter = WebsiteStarterFactory.build() content = WebsiteContentFactory.build( is_page_content=False, type="non-existent-config-name" if has_missing_name else config_item_name, ) return_value = get_destination_filepath(content=content, site_config=SiteConfig( starter.config)) patched_log.error.assert_called_once() assert return_value is None
def test_get_destination_url(is_page_content, dirpath, filename, expected): """get_destination_url should create a url for a piece of content""" content = WebsiteContentFactory.create(is_page_content=is_page_content, dirpath=dirpath, filename=filename) assert (get_destination_url( content, SiteConfig(content.website.starter.config)) == expected)
def test_videos_with_truncatable_text(mocker, is_ocw): """Videos with titles or descriptions that are too long should be returned""" mocker.patch("websites.api.is_ocw_site", return_value=is_ocw) website = WebsiteFactory.create() title_descs = ( (" ".join(["TooLongTitle" for _ in range(10)]), "desc"), ("title", " ".join(["TooLongDescription" for _ in range(500)])), ("title", "desc"), ) resources = [] for title, desc in title_descs: resources.append( WebsiteContentFactory.create( website=website, title=title, metadata={ "description": desc, "resourcetype": RESOURCE_TYPE_VIDEO, "video_files": { "video_captions_file": "abc123" }, }, )) truncatable_content = videos_with_truncatable_text(website) assert len(resources[1].metadata["description"]) > 5000 if is_ocw: assert len(truncatable_content) == 2 for content in resources[0:2]: assert content in truncatable_content else: assert truncatable_content == []
def test_resolveuid_conversion_cross_site(markdown, expected): """Check shortcodes are used within same site.""" target_content = WebsiteContentFactory.build( markdown=markdown, website=WebsiteFactory.build() ) linked_content = WebsiteContentFactory.build( text_id="5cf754b2-b97b-4ac1-8dab-deed1201de94", dirpath="content/pages/path/to", filename="thing", website=WebsiteFactory.build(name="other-site-name"), ) cleaner = get_markdown_cleaner([target_content, linked_content]) cleaner.update_website_content(target_content) assert target_content.markdown == expected
def test_incomplete_content_warnings(mocker, has_missing_ids, has_missing_captions, has_truncatable_text): """incomplete_content_warnings should return expected warning messages""" website = WebsiteFactory.create() video_content = WebsiteContentFactory.create_batch(3, website=website) no_yt_ids = video_content[0:2] if has_missing_ids else [] no_caps = video_content[1:3] if has_missing_captions else [] truncatable_vids = [video_content[2]] if has_truncatable_text else [] mocker.patch("websites.api.videos_with_truncatable_text", return_value=truncatable_vids) mocker.patch( "websites.api.videos_with_unassigned_youtube_ids", return_value=no_yt_ids, ) mocker.patch( "websites.api.videos_missing_captions", return_value=no_caps, ) warnings = incomplete_content_warnings(website) warnings_len = 0 if has_missing_ids: warnings_len += 1 for content in no_yt_ids: assert content.title in warnings[0] if has_missing_captions: warnings_len += 1 for content in no_caps: assert content.title in warnings[1 if has_missing_ids else 0] if has_truncatable_text: warnings_len += 1 assert len(warnings) == warnings_len assert video_content[2].title in warnings[warnings_len - 1] if not has_missing_ids and not has_missing_captions and not has_truncatable_text: assert warnings == []
def test_baseurl_replacer_specific_title_replacements(markdown, expected_markdown): """Test specific replacements""" website_uuid = "website-uuid" website = WebsiteFactory.build(uuid=website_uuid) target_content = WebsiteContentFactory.build(markdown=markdown, website=website) linkable = WebsiteContentFactory.build( website=website, dirpath="content/resources/path/to", filename="file1", text_id="content-uuid-1", ) cleaner = get_markdown_cleaner([linkable]) cleaner.update_website_content(target_content) assert target_content.markdown == expected_markdown
def test_rootrel_rule_handles_site_homeages_correctly(markdown, site_name, expected_markdown): w1 = WebsiteFactory.build(name="site_one") w2 = WebsiteFactory.build(name="site_two") websites = {w.name: w for w in [w1, w2]} c1 = WebsiteContentFactory.build(website=w1, type="sitemetadata", filename="", dirpath="", text_id="uuid-1") content_to_clean = WebsiteContentFactory.build(website=websites[site_name], markdown=markdown) cleaner = get_markdown_cleaner([w1], [c1]) cleaner.update_website_content(content_to_clean) assert content_to_clean.markdown == expected_markdown
def test_website_content_unpublished(): """Website should set has_unpublished_live and has_unpublished_draft if any related content is updated""" website = WebsiteFactory.create() content = WebsiteContentFactory.create(website=website) website.has_unpublished_live = False website.has_unpublished_draft = False website.save() other_content = WebsiteContentFactory.create() other_content.save() website.refresh_from_db() # website should not have changed since the content is for a different website assert website.has_unpublished_live is False assert website.has_unpublished_draft is False content.save() website.refresh_from_db() assert website.has_unpublished_live is True assert website.has_unpublished_draft is True
def test_legacy_shortcode_fix_one(markdown, expected_markdown): """Test specific replacements""" website = WebsiteFactory.build() target_content = WebsiteContentFactory.build(markdown=markdown, website=website) cleaner = Cleaner(LegacyShortcodeFixOne()) cleaner.update_website_content(target_content) assert target_content.markdown == expected_markdown
def test_rootrel_rule_only_uses_resource_lines_for_same_site( markdown, site_name, expected_markdown): w1 = WebsiteFactory.build(name="site_one") w2 = WebsiteFactory.build(name="site_two") websites = {w.name: w for w in [w1, w2]} c1 = WebsiteContentFactory.build(website=w1, filename="page1", dirpath="content/pages/stuff", text_id="uuid-1") content_to_clean = WebsiteContentFactory.build(website=websites[site_name], markdown=markdown) cleaner = get_markdown_cleaner([w1], [c1]) cleaner.update_website_content(content_to_clean) assert content_to_clean.markdown == expected_markdown
def test_legacy_file_lookup_raises_nonunique_for_multiple_matches(): c1a = WebsiteContentFactory.build( website_id="site-uuid-one", file=f"/courses/site_one/{string_uuid()}_some_file_name.jpg", text_id="content-uuid-1", ) c1b = WebsiteContentFactory.build( website_id="site-uuid-one", file=f"/courses/site_one/{string_uuid()}_some_file_name.jpg", text_id="content-uuid-2", ) contents = [c1a, c1b] with patch_website_contents_all(contents): legacy_file_lookup = LegacyFileLookup() with pytest.raises(legacy_file_lookup.MultipleMatchError): assert legacy_file_lookup.find("site-uuid-one", "some_file_name.jpg")
def test_content_finder_returns_metadata_for_site(site_uuid, content_index): contents = [ WebsiteContentFactory.build( website=WebsiteFactory.build(uuid="website_one"), type="sitemetadata", text_id="content-1", ), WebsiteContentFactory.build( website=WebsiteFactory.build(uuid="website_two"), type="sitemetadata", text_id="content-2", ), ] with patch_website_contents_all(contents): content_lookup = ContentLookup() assert (content_lookup.find_within_site( site_uuid, "/") == contents[content_index])
def github(settings, mocker, mock_branches): """ Create a github backend for a website """ settings.GIT_TOKEN = "faketoken" settings.GIT_ORGANIZATION = "fake_org" mock_github_api = mocker.patch( "content_sync.backends.github.GithubApiWrapper", ) mock_repo = mock_github_api.get_repo.return_value mock_repo.default_branch = settings.GIT_BRANCH_MAIN mock_repo.get_branches.return_value = [mock_branches[0]] website = WebsiteFactory.create() WebsiteContentFactory.create_batch(5, website=website) backend = GithubBackend(website) backend.api = mock_github_api yield SimpleNamespace(backend=backend, api=mock_github_api, repo=mock_repo, branches=mock_branches)
def test_shortcode_standardizer(text, expected): """Check that it removes extra args from resource shortcodes""" target_content = WebsiteContentFactory.build( markdown=text, website=WebsiteFactory.build()) cleaner = get_markdown_cleaner() cleaner.update_website_content(target_content) assert target_content.markdown == expected
def test_website_content_detail_with_file_serializer(): """WebsiteContentDetailSerializer should include its file url in metadata""" content = WebsiteContentFactory.create(type="resource", metadata={"title": "Test"}) content.file = SimpleUploadedFile("test.txt", b"content") serialized_data = WebsiteContentDetailSerializer(instance=content).data assert serialized_data["image"] == content.file.url assert serialized_data["metadata"]["title"] == content.metadata["title"]
def test_update_video(settings, mocker, youtube_mocker, privacy): """update_video should send the correct data in a request to update youtube metadata""" speakers = "speaker1, speaker2" tags = "tag1, tag2" youtube_id = "test video description" title = "TitleLngt>" description = "DescLngth>" content = WebsiteContentFactory.create( title=" ".join([title for i in range(11)]), metadata={ "resourcetype": RESOURCE_TYPE_VIDEO, "description": " ".join([description for _ in range(501)]), "video_metadata": { "youtube_id": youtube_id, "video_tags": tags, "video_speakers": speakers, }, }, ) expected_title = f'{" ".join([title.replace(">", "") for _ in range(9)])}...' expected_desc = f'{" ".join([description.replace(">", "") for _ in range(499)])}...' assert len(content.title) > YT_MAX_LENGTH_TITLE assert len(content.metadata["description"]) > YT_MAX_LENGTH_DESCRIPTION assert len(expected_title) <= YT_MAX_LENGTH_TITLE assert len(expected_desc) <= YT_MAX_LENGTH_DESCRIPTION mock_update_caption = mocker.patch( "videos.youtube.YouTubeApi.update_captions") YouTubeApi().update_video(content, privacy=privacy) youtube_mocker().videos.return_value.update.assert_any_call( part="snippet", body={ "id": youtube_id, "snippet": { "title": expected_title, "description": expected_desc, "tags": tags, "categoryId": settings.YT_CATEGORY_ID, }, }, ) if privacy is not None: youtube_mocker().videos.return_value.update.assert_any_call( part="status", body={ "id": youtube_id, "status": { "privacyStatus": privacy, "embeddable": True }, }, ) mock_update_caption.assert_called_once_with(content, youtube_id)
def test_create_content_sync_state(mocker): """ Test that the create_content_sync_state signal makes the correct call """ mock_api = mocker.patch("content_sync.signals.api", autospec=True) content = WebsiteContentFactory.create() mock_api.upsert_content_sync_state.assert_called_once_with(content) content.save() assert mock_api.upsert_content_sync_state.call_count == 2 mock_api.upsert_content_sync_state.assert_has_calls( [mocker.call(content), mocker.call(content)])