def test_rpm_bulk_download_rpm_file_exists(mock_download_file): """ Test if we download each rpm file only once. If the file exists we skip it. """ tmp_dir = tempfile.TemporaryDirectory() config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md def download_file(url, target_pkg_dir, filename): """ Mock function which fakes rpm downloads """ abs_file_path = "/".join([target_pkg_dir, filename]) open(abs_file_path, "w").close() mock_download_file.side_effect = download_file pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) pkgs, rpm_num = add_rpm_urls(pkgs, config) rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) # the call_count should be the same as the number of rpms in the metadata. # we called the rpm_bulk_download function twice, but the second time the download # part should be skipped for the same dir. assert mock_download_file.call_count == rpm_num
def test_add_rpm_urls(): """ Test for adding rpm urls to the pkgs dict for each package """ config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) pkgs, rpm_num = add_rpm_urls(pkgs, config) expected_rpm_num = 0 for pkg in pkgs: assert len(pkg["rpms"]) == len(pkg["rpm_urls"]) expected_rpm_num += len(pkg["rpms"]) for rpm in pkg["rpms"]: rpm_filename = ("-".join([rpm["name"], rpm["version"], rpm["release"]]) + "." + rpm["arch"]) pkg_md = pkg["package"] expected_url = (config.koji_storage_host + "/vol/" + pkg_md["volume_name"] + "/packages/" + pkg_md["package_name"] + "/" + pkg_md["version"] + "/" + pkg_md["release"] + "/" + rpm["arch"] + "/" + rpm_filename + ".rpm") assert expected_url in pkg["rpm_urls"] assert expected_rpm_num == rpm_num
def test_create_repo(mock_download_file): """ Test to create a rpm repository of out a dir. """ tmp_dir = tempfile.TemporaryDirectory() config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md def download_file(url, target_pkg_dir, filename): """ Mock function which fakes rpm downloads """ abs_file_path = "/".join([target_pkg_dir, filename]) open(abs_file_path, "w").close() mock_download_file.side_effect = download_file pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) pkgs, rpm_num = add_rpm_urls(pkgs, config) rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) create_repo(tmp_dir.name) assert os.path.exists(tmp_dir.name + "/repodata" )
def test_rpm_bulk_download_pkg_exist(mock_download_file): """ Test if we create each pkg dir only once. """ tmp_dir = tempfile.TemporaryDirectory() config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md def download_file(url, target_pkg_dir, filename): """ Mock function which fakes rpm downloads """ abs_file_path = "/".join([target_pkg_dir, filename]) open(abs_file_path, "w").close() mock_download_file.side_effect = download_file pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) pkgs, rpm_num = add_rpm_urls(pkgs, config) rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) with mock.patch("bld2repo.os.makedirs") as mock_makedirs: rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) assert not mock_makedirs.call_count
def test_get_credits_data(self): credits = self.imdb._get_credits_data("tt0111161") expected_credits = load_test_data("get_credits_tt0111161.json") assert len(expected_credits) == len(credits) for index, credit_item in enumerate(expected_credits): assert sorted(credit_item, key=itemgetter(1)) == sorted(credits[index], key=itemgetter(1))
def test_extract_credits(): credits_data = load_test_data("title_maindetails.json")["credits"] data = {"credits": credits_data[0:3]} title = Title(data=data) people = title._extract_credits() assert 6 == len(people) assert people[0].roles == [] assert people[0].label == "Directed by" assert people[0].token == "directors" assert people[0].name == "Aleksey Popogrebskiy" assert people[0].imdb_id == "nm1403225" assert people[5].roles == ["Golos po ratsii - Volodya"] assert people[5].label == "Cast" assert people[5].token == "cast" assert people[5].name == "Ilya Sobolev" assert people[5].imdb_id == "nm3777825" assert people[2].roles == ["Sergey"] assert people[2].label == "Cast" assert people[2].token == "cast" assert people[2].name == "Sergey Puskepalis" assert people[2].imdb_id == "nm1655234"
def test_extract_credits(): credits_data = load_test_data('title_maindetails.json')['credits'] data = {'credits': credits_data[0:3]} title = Title(data=data) people = title._extract_credits(data) assert 6 == len(people) assert people[0].roles == [] assert people[0].label == 'Directed by' assert people[0].token == 'directors' assert people[0].name == 'Aleksey Popogrebskiy' assert people[0].imdb_id == 'nm1403225' assert people[5].roles == ['Golos po ratsii - Volodya'] assert people[5].label == 'Cast' assert people[5].token == 'cast' assert people[5].name == 'Ilya Sobolev' assert people[5].imdb_id == 'nm3777825' assert people[2].roles == ['Sergey'] assert people[2].label == 'Cast' assert people[2].token == 'cast' assert people[2].name == 'Sergey Puskepalis' assert people[2].imdb_id == 'nm1655234'
def set_up(): data = load_test_data('title_maindetails.json') title = Title(data=data) return { 'title_data': data, 'title': title }
def set_up(): data = load_test_data('title_episodes.json') seasons = data.get('data').get('seasons') first_episode = seasons[0].get('list')[0] return { 'data': first_episode, }
def test_get_credits_data(self): credits = self.imdb._get_credits_data('tt0111161') expected_credits = load_test_data('get_credits_tt0111161.json') assert len(expected_credits) == len(credits) for index, credit_item in enumerate(expected_credits): assert (sorted(credit_item, key=itemgetter(1)) == sorted(credits[index], key=itemgetter(1)))
def test_get_title_by_id(self): title = self.imdb.get_title_by_id('tt0111161') assert title.title == 'The Shawshank Redemption' assert title.year == 1994 assert title.type == 'feature' assert title.tagline == ('Fear can hold you prisoner. ' 'Hope can set you free.') assert isinstance(title.plots, list) is True assert len(title.plots) == 6 assert isinstance(title.rating, float) is True assert sorted(title.genres) == sorted(['Crime', 'Drama']) assert isinstance(title.votes, int) is True assert title.runtime == 8520 assert title.poster_url == ( 'http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX' 'kFtZTgwMDU2MjEyMDE@._V1_.jpg' ) assert title.cover_url == ( 'http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX' 'kFtZTgwMDU2MjEyMDE@._V1__SX214_.jpg' ) assert title.release_date == '1994-10-14' assert title.certification == 'R' assert 'http://ia.media-imdb.com/images' in title.trailer_image_urls[0] expected_plot_outline = ( 'Two imprisoned men bond over a number ' 'of years, finding solace and eventual redemption through acts ' 'of common decency.' ) assert title.plot_outline == expected_plot_outline assert isinstance(title.directors_summary[0], Person) assert len(title.directors_summary) == 1 assert len(title.creators) == 0 assert len(title.cast_summary) == 4 expected_cast_names = ['Tim Robbins', 'Morgan Freeman', 'Bob Gunton', 'William Sadler'] for name in expected_cast_names: assert name in [p.name for p in title.cast_summary] expected_writers = ['Stephen King', 'Frank Darabont'] for name in expected_writers: assert name in [p.name for p in title.writers_summary] assert len(title.credits) >= 327 assert ( sorted(load_test_data('expected_credits.json')) == sorted([p.imdb_id for p in title.credits]) ) assert isinstance(title.credits[10], Person) assert len(title.trailers) == 3
def test_not_module_exception(): """ Test raise when the build does not contain a build tag and is not a module. """ mock_session = mock.Mock() config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") mock_session.getBuild.return_value = build # we remove the build tag from the tags list tags.pop(1) mock_session.listTags.return_value = tags with pytest.raises(Exception) as ex: get_buildrequire_pkgs_from_build("1234", mock_session, config) err_msg = ex.value.args[0] assert "1234" in err_msg assert "not tagged" in err_msg assert "'build' tag" in err_msg
def test_get_title_by_id(self): title = self.imdb.get_title_by_id('tt0111161') assert title.title == 'The Shawshank Redemption' assert title.year == 1994 assert title.type == 'feature' assert title.tagline == ('Fear can hold you prisoner. ' 'Hope can set you free.') assert isinstance(title.plots, list) is True assert len(title.plots) == 5 assert isinstance(title.rating, float) is True assert sorted(title.genres) == sorted(['Crime', 'Drama']) assert isinstance(title.votes, int) is True assert title.runtime == 8520 assert title.poster_url == ( 'http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX' 'kFtZTgwMDU2MjEyMDE@._V1_.jpg') assert title.cover_url == ( 'http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX' 'kFtZTgwMDU2MjEyMDE@._V1__SX214_.jpg') assert title.release_date == '1994-10-14' assert title.certification == 'R' assert title.trailer_image_urls == [ 'http://ia.media-imdb.com/images/M/MV5BMzAzMDI1MTE0MF5BMl5BanBnX' 'kFtZTgwNjMxNTMzMzE@._V1_.jpg' ] expected_plot_outline = ( 'Two imprisoned men bond over a number ' 'of years, finding solace and eventual redemption through acts ' 'of common decency.') assert title.plot_outline == expected_plot_outline assert isinstance(title.directors_summary[0], Person) assert len(title.directors_summary) == 1 assert len(title.creators) == 0 assert len(title.cast_summary) == 4 expected_cast_names = [ 'Tim Robbins', 'Morgan Freeman', 'Bob Gunton', 'William Sadler' ] for name in expected_cast_names: assert name in [p.name for p in title.cast_summary] expected_writers = ['Stephen King', 'Frank Darabont'] for name in expected_writers: assert name in [p.name for p in title.writers_summary] assert len(title.credits) == 326 assert (sorted(load_test_data('expected_credits.json')) == sorted( [p.imdb_id for p in title.credits])) assert isinstance(title.credits[10], Person) assert len(title.trailers) == 3
def set_up(): data = load_test_data('title_episodes.json') seasons = data.get('data').get('seasons') first_episode = seasons[0].get('list')[0] first_episode['series_name'] = 'Some Series Name' first_episode['episode'] = 4 first_episode['season'] = 5 return { 'data': first_episode, }
def test_rpm_bulk_download(mock_download_file): """ Test if the rpm files are downloaded. """ tmp_dir = tempfile.TemporaryDirectory() config = Config("koji_fake_url", "koji_fake_storage", "x86_64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md def download_file(url, target_pkg_dir, filename): """ Mock function which fakes rpm downloads """ abs_file_path = "/".join([target_pkg_dir, filename]) open(abs_file_path, "w").close() mock_download_file.side_effect = download_file pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) pkgs, rpm_num = add_rpm_urls(pkgs, config) rpm_bulk_download(pkgs, rpm_num, tmp_dir.name) # we gather all the files created on disk created_rpm_files = [] for _, _, f in os.walk(tmp_dir.name): created_rpm_files += f # test if the number of created files is the same as provided by the metadata assert len(created_rpm_files) == rpm_num # test if the filenames are the same as described in the metadata for pkg in pkgs: for rpm_url in pkg["rpm_urls"]: rpm = rpm_url.split("/")[-1] assert rpm in created_rpm_files
def test_get_buildrequire_pkgs_from_build_aarch64(): """ Test for gathering aarch64 build dependencies.""" config = Config("koji_fake_url", "koji_fake_storage", "aarch64", ".") build = load_test_data("pki_core_build") tags = load_test_data("pki_core_tags") build_tag_md = load_test_data("pki_core_build_tag") mock_session = mock.Mock() mock_session.getBuild.return_value = build mock_session.listTags.return_value = tags mock_session.listTaggedRPMS.return_value = build_tag_md pkgs = get_buildrequire_pkgs_from_build("1234", mock_session, config) assert type(pkgs) == list assert len(pkgs) == 50 for pkg in pkgs: for rpm in pkg["rpms"]: assert rpm["arch"] in ["aarch64", "noarch"]
class TestGenerator: @pytest.fixture(scope="class") def fxt_generator(self): return Generator(Path("banone/dict/de.yaml")) @pytest.mark.parametrize( ("base_str", "extra_str", "question"), load_test_data(["base", "extra", "question"]), ) def test_generate_question(self, base_str, extra_str, question, fxt_generator): base = fxt_generator.dict.lookup(base_str) extra = fxt_generator.dict.lookup(extra_str) assert fxt_generator.generate_question(base, extra) == question @pytest.mark.parametrize( ("base_str", "compound_str", "answer"), load_test_data(["base", "compound", "answer"]), ) def test_generate_answer(self, base_str, compound_str, answer, fxt_generator, fxt_dict): base = fxt_dict.lookup(base_str) compound = Lemma(compound_str) assert fxt_generator.generate_answer(base, compound) == answer @pytest.mark.parametrize( ("base_str", "extra_str", "question", "answer"), load_test_data(["base", "extra", "question", "answer"]), ) def test_generate_riddle(self, base_str, extra_str, question, answer, fxt_generator): base = fxt_generator.dict.lookup(base_str) extra = fxt_generator.dict.lookup(extra_str) assert fxt_generator.generate_riddle(base, extra) == str.format( "{}\n{}", question, answer)
def test_get_title_by_id(self): title = self.imdb.get_title_by_id("tt0111161") assert title.title == "The Shawshank Redemption" assert title.year == 1994 assert title.type == "feature" assert title.tagline == ("Fear can hold you prisoner. " "Hope can set you free.") assert isinstance(title.plots, list) is True assert len(title.plots) == 5 assert isinstance(title.rating, float) is True assert sorted(title.genres) == sorted(["Crime", "Drama"]) assert isinstance(title.votes, int) is True assert title.runtime == 8520 assert title.poster_url == ( "http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX" "kFtZTgwMDU2MjEyMDE@._V1_.jpg" ) assert title.cover_url == ( "http://ia.media-imdb.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnX" "kFtZTgwMDU2MjEyMDE@._V1__SX214_.jpg" ) assert title.release_date == "1994-10-14" assert title.certification == "R" assert title.trailer_image_urls == [ "http://ia.media-imdb.com/images/M/MV5BMzAzMDI1MTE0MF5BMl5BanBnX" "kFtZTgwNjMxNTMzMzE@._V1_.jpg" ] expected_plot_outline = ( "Two imprisoned men bond over a number " "of years, finding solace and eventual redemption through acts " "of common decency." ) assert title.plot_outline == expected_plot_outline assert isinstance(title.directors_summary[0], Person) assert len(title.directors_summary) == 1 assert len(title.creators) == 0 assert len(title.cast_summary) == 4 expected_cast_names = ["Tim Robbins", "Morgan Freeman", "Bob Gunton", "William Sadler"] for name in expected_cast_names: assert name in [p.name for p in title.cast_summary] expected_writers = ["Stephen King", "Frank Darabont"] for name in expected_writers: assert name in [p.name for p in title.writers_summary] assert len(title.credits) == 326 assert sorted(load_test_data("expected_credits.json")) == sorted([p.imdb_id for p in title.credits]) assert isinstance(title.credits[10], Person) assert len(title.trailers) == 3
def test_get_title_by_id(self): title = self.imdb.get_title_by_id("tt0111161") assert title.title == "The Shawshank Redemption" assert title.year == 1994 assert title.type == "feature" assert title.tagline == ("Fear can hold you prisoner. " "Hope can set you free.") assert isinstance(title.plots, list) is True assert len(title.plots) >= 5 assert isinstance(title.rating, float) is True assert sorted(title.genres) == sorted(["Crime", "Drama"]) assert isinstance(title.votes, int) is True assert title.runtime == 8520 assert is_valid_url(title.poster_url) is True assert is_valid_url(title.cover_url) is True assert title.release_date == "1994-10-14" assert title.certification == "R" for trailer_url in title.trailer_image_urls: assert is_valid_url(trailer_url) is True expected_plot_outline = ( "Two imprisoned men bond over a number " "of years, finding solace and eventual redemption through acts " "of common decency." ) assert title.plot_outline == expected_plot_outline assert isinstance(title.directors_summary[0], Person) assert len(title.directors_summary) == 1 assert len(title.creators) == 0 assert len(title.cast_summary) == 4 expected_cast_names = ["Tim Robbins", "Morgan Freeman", "Bob Gunton", "William Sadler"] for name in expected_cast_names: assert name in [p.name for p in title.cast_summary] expected_writers = ["Stephen King", "Frank Darabont"] for name in expected_writers: assert name in [p.name for p in title.writers_summary] assert len(title.credits) >= 327 assert sorted(load_test_data("expected_credits.json")) == sorted([p.imdb_id for p in title.credits]) assert isinstance(title.credits[10], Person) assert len(title.trailers) == 3
class TestLemma: @pytest.mark.parametrize( ("orth", "lemma_dict"), [ ("", {}), ("Test", None), ("Ananas", { "pos": "NN", "determiner": "eine", "color": "gelb" }), ( "Apfelsine", { "pos": "NN", "determiner": "eine", "color": "orange", "property": "rund", }, ), ( "Kaninchen", { "pos": "NN", "determiner": "ein", "property": "niedlich", "action": "hoppelt über die Wiese", }, ), ("schweben", { "pos": "VB", "action": "berührt nicht den Boden" }), ], ) def test_create(self, orth, lemma_dict): lemma = Lemma(orth) if lemma_dict is not None: lemma = Lemma(orth, lemma_dict) lemma_dict = lemma_dict or {} assert lemma.orth == orth assert lemma.pos == lemma_dict.get("pos") assert lemma.determiner == lemma_dict.get("determiner") assert lemma.color == lemma_dict.get("color") assert lemma.property == lemma_dict.get("property") assert lemma.action == lemma_dict.get("action") @pytest.mark.parametrize( ("orth", "lemma_dict", "phon_stem", "orth_stem"), [ ("Haus", { "phon": "haUs", "pos": "NN" }, "haUs", "Haus"), ("Magie", { "phon": "ma-gi:", "pos": "NN" }, "ma-gi:", "Magie"), ("Fahne", { "phon": "fa:-n@", "pos": "NN" }, "fa:n", "Fahn"), ("Banane", { "phon": "ba-na:-n@", "pos": "NN" }, "ba-na:n", "Banan"), ("bauen", { "phon": "baU-@n", "pos": "VB" }, "baU", "bau"), ("malen", { "phon": "ma:-l@n", "pos": "VB" }, "ma:l", "mal"), ("anmalen", { "phon": "an-ma:-l@n", "pos": "VB" }, "an-ma:l", "anmal"), ("zappeln", { "phon": "tsa-p@ln", "pos": "VB" }, "tsa-p@l", "zappel"), ("toll", { "phon": "tOl", "pos": "ADJ" }, "tOl", "toll"), ], ) def test_get_stem(self, orth, lemma_dict, phon_stem, orth_stem): lemma = Lemma(orth, lemma_dict) assert lemma.get_stem() == (phon_stem, orth_stem) @pytest.mark.parametrize( ("base_str", "extra_str", "compound_str"), load_test_data(["base", "extra", "compound"]), ) def test_merge(self, base_str, extra_str, compound_str, fxt_dict): base = fxt_dict.lookup(base_str) extra = fxt_dict.lookup(extra_str) compound = Lemma(compound_str) exp = base.merge(extra) assert exp.orth == compound.orth @pytest.mark.parametrize( ("base_str", "extra_str"), [ # The base word needs a minimum number of syllables to enable matching # of sounds with a distance > 1 (e.g. n/m). ("Kamin", "Fahne"), ("Kamin", "Schwan"), ("Kamin", "spannen"), # The base word should be longer than one syllable not counting an # unstressed end syllable. ("Fahne", "Schwan"), ("Fahne", "spannen"), # The base word should not have less syllables than the extra word. ("Pudel", "Nudelauflauf"), # If two stressed vowels shall be matched they must have the same length. ("Tannzapfen", "Fahne"), ], ) def test_merge_fail(self, base_str, extra_str, fxt_dict): base = fxt_dict.lookup(base_str) extra = fxt_dict.lookup(extra_str) assert base.merge(extra) is None
def set_up(): data = load_test_data("title_maindetails.json") title = Title(data=data) return {"title_data": data, "title": title}
def set_up(): data = load_test_data('title_maindetails.json') return { 'data': data, }
def set_up(): data = load_test_data('title_maindetails.json') title = Title(data=data) return {'title_data': data, 'title': title}
def set_up(): data = load_test_data("title_maindetails.json") return {"data": data}
def load_test_data(cls): cls.test_data = load_test_data(fixture_data_root, cls.test_tables)