def make_language_pack(lang, version, sublangargs, filename, no_assessment_items, no_subtitles, no_assessment_resources): node_data, subtitle_data, interface_catalog, content_catalog = retrieve_language_resources(version, sublangargs, no_subtitles) subtitles, subtitle_paths = subtitle_data.keys(), subtitle_data.values() node_data = translate_nodes(node_data, content_catalog) node_data = list(node_data) node_data, dubbed_video_count = apply_dubbed_video_map(node_data, subtitles, sublangargs["video_lang"]) html_exercise_ids, assessment_exercise_ids, node_data = separate_exercise_types(node_data) html_exercise_path, translated_html_exercise_ids = retrieve_html_exercises(html_exercise_ids, lang) # now include only the assessment item resources that we need all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data( no_item_data=no_assessment_items, no_item_resources=no_assessment_resources ) assessment_data = list(translate_assessment_item_text(all_assessment_data, content_catalog)) if lang != "en" else all_assessment_data node_data = remove_untranslated_exercises(node_data, translated_html_exercise_ids, assessment_data) if lang != "en" else node_data pack_metadata = generate_kalite_language_pack_metadata(lang, version, interface_catalog, content_catalog, subtitles, dubbed_video_count) bundle_language_pack(str(filename), node_data, interface_catalog, interface_catalog, pack_metadata, assessment_data, all_assessment_files, subtitle_paths, html_exercise_path)
def make_language_pack(lang, version, sublangargs, filename, ka_domain, no_assessment_items, no_subtitles, no_assessment_resources, no_dubbed_videos): node_data, subtitle_data, content_catalog = retrieve_language_resources( version, sublangargs, ka_domain, no_subtitles, no_dubbed_videos) node_data = translate_nodes(node_data, content_catalog) node_data = list(node_data) node_data, dubbed_video_count = apply_dubbed_video_map( node_data, subtitle_data, sublangargs["video_lang"]) # now include only the assessment item resources that we need all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data( no_item_data=no_assessment_items, no_item_resources=no_assessment_resources, node_data=node_data, lang=lang, content_catalog=content_catalog, ) all_assessment_data = list( remove_assessment_data_with_empty_widgets(all_assessment_data)) node_data = remove_nonexistent_assessment_items_from_exercises( node_data, all_assessment_data) node_data = clean_node_data_items(node_data) node_data = remove_untranslated_exercises( node_data, all_assessment_data) if lang != "en" else node_data node_data = list(node_data) node_data = sorted(node_data, key=lambda x: x.get('sort_order')) with open('node_data_{0}.pickle'.format(lang), 'wb') as handle: pickle.dump(node_data, handle) with open('assessment_data_{0}.pickle'.format(lang), 'wb') as handle: pickle.dump(all_assessment_data, handle)
def make_language_pack(lang, version, sublangargs, filename, ka_domain, no_assessment_items, no_subtitles, no_assessment_resources, no_dubbed_videos): node_data, subtitle_data, interface_catalog, content_catalog = retrieve_language_resources(version, sublangargs, ka_domain, no_subtitles, no_dubbed_videos) subtitles, subtitle_paths = subtitle_data.keys(), subtitle_data.values() node_data = translate_nodes(node_data, content_catalog) node_data = list(node_data) node_data, dubbed_video_count = apply_dubbed_video_map(node_data, subtitles, sublangargs["video_lang"]) html_exercise_ids, assessment_exercise_ids, node_data = separate_exercise_types(node_data) html_exercise_path, translated_html_exercise_ids = retrieve_html_exercises(html_exercise_ids, lang) # now include only the assessment item resources that we need all_assessment_data, all_assessment_files = retrieve_all_assessment_item_data( no_item_data=no_assessment_items, no_item_resources=no_assessment_resources, node_data=node_data, lang=lang, content_catalog=content_catalog, ) all_assessment_data = list(remove_assessment_data_with_empty_widgets(all_assessment_data)) node_data = remove_nonexistent_assessment_items_from_exercises(node_data, all_assessment_data) node_data = clean_node_data_items(node_data) assessment_data = list(all_assessment_data) node_data = remove_untranslated_exercises(node_data, translated_html_exercise_ids, assessment_data) if lang != "en" else node_data pack_metadata = generate_kalite_language_pack_metadata(lang, version, sublangargs, interface_catalog, content_catalog, subtitles, dubbed_video_count) bundle_language_pack(str(filename), node_data, interface_catalog, interface_catalog, pack_metadata, assessment_data, all_assessment_files, subtitle_paths, html_exercise_path)
def test_translates_selected_fields(self): node_data = dict(generate_node_list()) catalog = generate_catalog() translated_nodes = translate_nodes(node_data.items(), catalog) for slug, node in translated_nodes: for field in NODE_FIELDS_TO_TRANSLATE: translated_fieldval = node[field] untranslated_fieldval = node_data[slug][field] assert translated_fieldval == catalog.msgid_mapping.get(untranslated_fieldval, untranslated_fieldval)
def test_translates_selected_fields(self): node_data = retrieve_kalite_data() node_dict = {node.get("path"): node for node in node_data} catalog = generate_catalog() translated_nodes = translate_nodes(node_data, catalog) for node in translated_nodes: for field in NODE_FIELDS_TO_TRANSLATE: translated_fieldval = node.get(field, "") untranslated_fieldval = node_dict[node.get("path")].get(field, "") assert translated_fieldval == catalog.get(untranslated_fieldval, untranslated_fieldval)
def test_translates_selected_fields(self): node_data = retrieve_kalite_data() node_dict = {node.get("path"): node for node in node_data} catalog = generate_catalog() translated_nodes = translate_nodes(node_data, catalog) for node in translated_nodes: for field in NODE_FIELDS_TO_TRANSLATE: translated_fieldval = node.get(field, "") untranslated_fieldval = node_dict[node.get("path")].get( field, "") assert translated_fieldval == catalog.get( untranslated_fieldval, untranslated_fieldval)
def test_translate_nodes(self): node_data = retrieve_kalite_data() translated_node_data = translate_nodes( node_data, self.ka_catalog, ) for translated_node, untranslated_node in zip(translated_node_data, node_data): for field in NODE_FIELDS_TO_TRANSLATE: untranslated_fieldval = untranslated_node.get(field) translated_fieldval = translated_node.get(field) assert (translated_fieldval == self.ka_catalog.get( untranslated_fieldval, untranslated_fieldval))
def test_translate_nodes(self): node_data = retrieve_kalite_data() translated_node_data = translate_nodes( node_data, self.ka_catalog, ) for translated_node, untranslated_node in zip(translated_node_data, node_data): for field in NODE_FIELDS_TO_TRANSLATE: untranslated_fieldval = untranslated_node.get(field) translated_fieldval = translated_node.get(field) assert (translated_fieldval == self.ka_catalog.get( untranslated_fieldval, untranslated_fieldval) )