def make_language_pack(lang, version, sublangargs, filename, ka_domain,
                       no_assessment_items, no_subtitles,
                       no_assessment_resources, no_dubbed_videos):
    node_data, subtitle_data, content_catalog = retrieve_language_resources(
        version, sublangargs, ka_domain, no_subtitles, no_dubbed_videos)

    node_data = translate_nodes(node_data, content_catalog)
    node_data = list(node_data)
    node_data, dubbed_video_count = apply_dubbed_video_map(
        node_data, subtitle_data, sublangargs["video_lang"])

    # now include only the assessment item resources that we need
    all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data(
        no_item_data=no_assessment_items,
        no_item_resources=no_assessment_resources,
        node_data=node_data,
        lang=lang,
        content_catalog=content_catalog,
    )
    all_assessment_data = list(
        remove_assessment_data_with_empty_widgets(all_assessment_data))
    node_data = remove_nonexistent_assessment_items_from_exercises(
        node_data, all_assessment_data)

    node_data = clean_node_data_items(node_data)
    node_data = remove_untranslated_exercises(
        node_data, all_assessment_data) if lang != "en" else node_data
    node_data = list(node_data)
    node_data = sorted(node_data, key=lambda x: x.get('sort_order'))

    with open('node_data_{0}.pickle'.format(lang), 'wb') as handle:
        pickle.dump(node_data, handle)

    with open('assessment_data_{0}.pickle'.format(lang), 'wb') as handle:
        pickle.dump(all_assessment_data, handle)
def make_language_pack(lang, version, sublangargs, filename, no_assessment_items, no_subtitles, no_assessment_resources):
    node_data, subtitle_data, interface_catalog, content_catalog = retrieve_language_resources(version, sublangargs, no_subtitles)

    subtitles, subtitle_paths = subtitle_data.keys(), subtitle_data.values()

    node_data = translate_nodes(node_data, content_catalog)
    node_data = list(node_data)
    node_data, dubbed_video_count = apply_dubbed_video_map(node_data, subtitles, sublangargs["video_lang"])

    html_exercise_ids, assessment_exercise_ids, node_data = separate_exercise_types(node_data)
    html_exercise_path, translated_html_exercise_ids = retrieve_html_exercises(html_exercise_ids, lang)

    # now include only the assessment item resources that we need
    all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data(
        no_item_data=no_assessment_items,
        no_item_resources=no_assessment_resources
    )

    assessment_data = list(translate_assessment_item_text(all_assessment_data, content_catalog)) if lang != "en" else all_assessment_data

    node_data = remove_untranslated_exercises(node_data, translated_html_exercise_ids, assessment_data) if lang != "en" else node_data

    pack_metadata = generate_kalite_language_pack_metadata(lang, version, interface_catalog, content_catalog, subtitles,
                                                           dubbed_video_count)

    bundle_language_pack(str(filename), node_data, interface_catalog, interface_catalog,
                         pack_metadata, assessment_data, all_assessment_files, subtitle_paths, html_exercise_path)
예제 #3
0
def make_language_pack(lang, version, sublangargs, filename, ka_domain, no_assessment_items, no_subtitles, no_assessment_resources, no_dubbed_videos):
    node_data, subtitle_data, interface_catalog, content_catalog = retrieve_language_resources(version, sublangargs, ka_domain, no_subtitles, no_dubbed_videos)

    subtitles, subtitle_paths = subtitle_data.keys(), subtitle_data.values()

    node_data = translate_nodes(node_data, content_catalog)
    node_data = list(node_data)
    node_data, dubbed_video_count = apply_dubbed_video_map(node_data, subtitles, sublangargs["video_lang"])

    html_exercise_ids, assessment_exercise_ids, node_data = separate_exercise_types(node_data)
    html_exercise_path, translated_html_exercise_ids = retrieve_html_exercises(html_exercise_ids, lang)

    # now include only the assessment item resources that we need
    all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data(
        no_item_data=no_assessment_items,
        no_item_resources=no_assessment_resources,
        node_data=node_data,
        lang=lang,
        content_catalog=content_catalog,
    )
    all_assessment_data = list(remove_assessment_data_with_empty_widgets(all_assessment_data))
    node_data = remove_nonexistent_assessment_items_from_exercises(node_data, all_assessment_data)

    node_data = clean_node_data_items(node_data)
    assessment_data = list(all_assessment_data)

    node_data = remove_untranslated_exercises(node_data, translated_html_exercise_ids, assessment_data) if lang != "en" else node_data

    pack_metadata = generate_kalite_language_pack_metadata(lang, version, sublangargs, interface_catalog, content_catalog, subtitles,
                                                           dubbed_video_count)

    bundle_language_pack(str(filename), node_data, interface_catalog, interface_catalog,
                         pack_metadata, assessment_data, all_assessment_files, subtitle_paths, html_exercise_path)
예제 #4
0
 def test_retrieve_all_assessment_item_return_list_of_str(self):
     with vcr.use_cassette(
             "tests/fixtures/cassettes/kalite/assessment_item_data.json.yml",
             record_mode="all"):
         data, paths = retrieve_all_assessment_item_data(
             node_data=[self.assess_node])
     for path in paths:
         assert isinstance(path, str), "Path is not of type str"
예제 #5
0
 def test_retrieve_all_assessment_item_return_list_of_dicts(self):
     with vcr.use_cassette(
             "tests/fixtures/cassettes/kalite/assessment_item_data.json.yml",
             record_mode="all"):
         data, paths = retrieve_all_assessment_item_data(
             node_data=[self.assess_node])
     for datum in data:
         assert isinstance(datum, dict), "Data is not of type dict"
 def test_retrieve_all_assessment_item_return_list_of_str(self):
     with vcr.use_cassette("tests/fixtures/cassettes/kalite/assessment_item_data.json.yml", record_mode="all"):
         data, paths = retrieve_all_assessment_item_data(node_data=[self.assess_node])
     for path in paths:
         assert isinstance(path, str), "Path is not of type str"
 def test_retrieve_all_assessment_item_return_list_of_dicts(self):
     with vcr.use_cassette("tests/fixtures/cassettes/kalite/assessment_item_data.json.yml", record_mode="all"):
         data, paths = retrieve_all_assessment_item_data(node_data=[self.assess_node])
     for datum in data:
         assert isinstance(datum, dict), "Data is not of type dict"