Ejemplo n.º 1
0
def _upload_translation(branch, lang_object):

    if no_crowdin_branch(branch, get_crowdin_details()):
        logging.error("Branch '{}' not found.".format(branch))
        sys.exit(1)

    logging.info(
        "Crowdin: uploading translation files for '{}' to '{}'...".format(
            lang_object[utils.KEY_CROWDIN_CODE], branch))

    url = UPLOAD_TRANSLATION_URL.format(
        branch=branch, language=lang_object[utils.KEY_CROWDIN_CODE])

    file_names = []
    for name in os.listdir(utils.local_locale_path(lang_object)):
        if is_string_file(name):
            file_names.append(name)
    for name in os.listdir(utils.local_perseus_locale_path(lang_object)):
        if is_string_file(name):
            file_names.append(name)

    for chunk in _chunks(file_names):
        references = [_translation_upload_ref(f, lang_object) for f in chunk]
        r = requests.post(url, files=references)
        r.raise_for_status()
        for ref in references:
            ref[1].close()

    logging.info("Crowdin: upload succeeded!")
Ejemplo n.º 2
0
def main(title, message, link_text):
    """
    Generate JSON suitable for sending in nutrition facts notifications
    """

    supported_languages = utils.supported_languages(include_in_context=False,
                                                    include_english=True)

    output = {}
    for lang_object in supported_languages:
        file_path = os.path.join(utils.local_locale_path(lang_object),
                                 FILE_NAME)
        i18n = {}
        with open(file_path) as f:
            input_data = json.load(f)
        if title in input_data:
            i18n[I18N_TITLE] = input_data[title]
        if message in input_data:
            i18n[I18N_MESSAGE] = input_data[message]
        if link_text in input_data:
            i18n[I18N_LINK_TEXT] = input_data[link_text]
        output[lang_object[utils.KEY_INTL_CODE]] = i18n

    # output JSON
    print(
        json.dumps(output,
                   sort_keys=True,
                   indent=2,
                   separators=(",", ": "),
                   ensure_ascii=False))
Ejemplo n.º 3
0
def _translation_upload_ref(file_name, lang_object):
    if file_name == PERSEUS_CSV:  # hack for perseus, assumes the same file name
        source_path = utils.local_perseus_locale_path(lang_object)
    else:
        source_path = utils.local_locale_path(lang_object)
    file_pointer = open(os.path.join(source_path, file_name), "rb")
    return ("files[{0}]".format(file_name), file_pointer)
Ejemplo n.º 4
0
def _process_downloaded_files():
    """
    Convert all CSV json files to JSON and ensure consistent diffs with ordered keys.
    Also copy over django.po files
    """

    for lang_object in utils.available_languages(include_in_context=True):
        locale_path = utils.local_locale_path(lang_object)
        perseus_path = utils.local_perseus_locale_path(lang_object)

        csv_locale_dir_path = os.path.join(utils.local_locale_csv_path(),
                                           lang_object["crowdin_code"])
        perseus_locale_dir_path = os.path.join(
            utils.local_perseus_locale_csv_path(), lang_object["crowdin_code"])

        # Make sure that the Perseus directory for CSV_FILES/{lang_code} exists
        if not os.path.exists(perseus_locale_dir_path):
            os.makedirs(perseus_locale_dir_path)

        files = os.listdir(csv_locale_dir_path) + os.listdir(
            perseus_locale_dir_path)

        for file_name in files:
            if file_name == PERSEUS_CSV:
                file_path = os.path.join(perseus_locale_dir_path, file_name)
            else:
                file_path = os.path.join(csv_locale_dir_path, file_name)

            if file_name == DJANGO_PO:
                shutil.move(file_path, locale_path)
                continue

            if "csv" not in file_name:
                continue

            # Account for csv reading differences in Pythons 2 and 3
            try:
                newline = None if sys.version_info[0] < 3 else ""
                mode = "r+b" if sys.version_info[0] < 3 else "r"
                encoding = None if sys.version_info[0] < 3 else "utf-8"
                csv_file = io.open(file_path,
                                   mode=mode,
                                   encoding=encoding,
                                   newline=newline)
            except EnvironmentError:
                logging.info(
                    "Failed to find CSV file in: {}".format(file_path))
                continue

            with csv_file as f:
                csv_data = list(row for row in csv.DictReader(f))

            data = _locale_data_from_csv(csv_data)

            if file_name == PERSEUS_CSV:
                utils.json_dump_formatted(data, perseus_path,
                                          file_name.replace("csv", "json"))
            else:
                utils.json_dump_formatted(data, locale_path,
                                          file_name.replace("csv", "json"))
Ejemplo n.º 5
0
def command_download(branch):
    """
    Downloads and updates the local translation files from the given branch on Crowdin
    """
    logging.info("Crowdin: downloading '{}'...".format(branch))

    # delete previous files
    _wipe_translations(utils.LOCALE_PATH)
    _wipe_translations(utils.PERSEUS_LOCALE_PATH)

    for lang_object in utils.supported_languages(include_in_context=True):
        code = lang_object[utils.KEY_CROWDIN_CODE]
        url = DOWNLOAD_URL.format(language=code, branch=branch)
        r = requests.get(url)
        r.raise_for_status()
        z = zipfile.ZipFile(io.BytesIO(r.content))
        target = utils.local_locale_path(lang_object)
        logging.info("\tExtracting {} to {}".format(code, target))
        z.extractall(target)

        # hack for perseus
        perseus_target = utils.local_perseus_locale_path(lang_object)
        if not os.path.exists(perseus_target):
            os.makedirs(perseus_target)
        shutil.move(
            os.path.join(target, PERSEUS_FILE),
            os.path.join(perseus_target, PERSEUS_FILE),
        )

    _format_json_files()  # clean them up to make git diffs more meaningful
    logging.info("Crowdin: download succeeded!")
Ejemplo n.º 6
0
def command_gen_subset_fonts():
    """
    Creates custom fonts that attempt to contain all the glyphs and other font features
    that are used in user-facing text for the translation in each language.

    We make a separate subset font for common strings, which generally overaps somewhat
    with the individual language subsets. This slightly increases how much the client
    needs to download on first request, but reduces Kolibri's distribution size by a
    couple megabytes.
    """
    logging.info("generating subset fonts...")

    _clean_up(SCOPE_COMMON)
    _clean_up(SCOPE_SUBSET)

    _subset_and_merge_fonts(
        text=" ".join(_get_common_strings()),
        default_font=NOTO_SANS_LATIN,
        subset_reg_path=_woff_font_path(SCOPE_COMMON, is_bold=False),
        subset_bold_path=_woff_font_path(SCOPE_COMMON, is_bold=True),
    )

    languages = utils.supported_languages(include_in_context=True,
                                          include_english=True)
    for lang_info in languages:
        logging.info("gen subset for {}".format(lang_info[utils.KEY_ENG_NAME]))
        strings = []
        strings.extend(_get_lang_strings(utils.local_locale_path(lang_info)))
        strings.extend(
            _get_lang_strings(utils.local_perseus_locale_path(lang_info)))

        name = lang_info[utils.KEY_INTL_CODE]
        _subset_and_merge_fonts(
            text=" ".join(strings),
            default_font=lang_info[utils.KEY_DEFAULT_FONT],
            subset_reg_path=_woff_font_path(_scoped(SCOPE_SUBSET, name),
                                            is_bold=False),
            subset_bold_path=_woff_font_path(_scoped(SCOPE_SUBSET, name),
                                             is_bold=True),
        )

    # generate common subset file
    _generate_inline_font_css(name=SCOPE_COMMON, font_family=SCOPE_COMMON)

    # generate language-specific subset font files
    languages = utils.supported_languages(include_in_context=True,
                                          include_english=True)
    for lang in languages:
        _generate_inline_font_css(
            name=_scoped(SCOPE_SUBSET, lang[utils.KEY_INTL_CODE]),
            font_family=SCOPE_SUBSET,
        )

    logging.info("subsets created")
Ejemplo n.º 7
0
def _format_json_files():
    """
    re-print all json files to ensure consistent diffs with ordered keys
    """
    locale_paths = []
    for lang_object in utils.supported_languages(include_in_context=True):
        locale_paths.append(utils.local_locale_path(lang_object))
        locale_paths.append(utils.local_perseus_locale_path(lang_object))
    for locale_path in locale_paths:
        for file_name in os.listdir(locale_path):
            if not file_name.endswith(".json"):
                continue
            file_path = os.path.join(locale_path, file_name)
            with io.open(file_path, mode="r", encoding="utf-8") as f:
                data = json.load(f)
            utils.json_dump_formatted(data, file_path)
Ejemplo n.º 8
0
def main(title, message, link_text):
    """
    Generate JSON suitable for sending in nutrition facts notifications
    """

    available_languages = utils.available_languages(include_in_context=False,
                                                    include_english=True)

    output = {}
    for lang_object in available_languages:
        file_path = os.path.join(utils.local_locale_path(lang_object),
                                 FILE_NAME)
        i18n = {}

        # If the language code is "en", parse csv file instead of json file.
        # Note that `make i18n-extract-frontend` should have been run to generate the csv file.
        if lang_object[utils.KEY_INTL_CODE] == "en":
            file_path = file_path.replace("json", "csv")
            with open(file_path) as f:
                csv_reader = csv.DictReader(f)
                for row in csv_reader:
                    identifier = row["Identifier"]
                    if title == identifier:
                        i18n[I18N_TITLE] = row["Source String"]
                    if message == identifier:
                        i18n[I18N_MESSAGE] = row["Source String"]
                    if link_text == identifier:
                        i18n[I18N_LINK_TEXT] = row["Source String"]

        else:
            with open(file_path) as f:
                input_data = json.load(f)
            if title in input_data:
                i18n[I18N_TITLE] = input_data[title]
            if message in input_data:
                i18n[I18N_MESSAGE] = input_data[message]
            if link_text in input_data:
                i18n[I18N_LINK_TEXT] = input_data[link_text]
        output[lang_object[utils.KEY_INTL_CODE]] = i18n

    # output JSON
    print(
        json.dumps(output,
                   sort_keys=True,
                   indent=2,
                   separators=(",", ": "),
                   ensure_ascii=False))
Ejemplo n.º 9
0
def _format_json_files():
    """
    re-print all json files to ensure consistent diffs with ordered keys
    """

    for lang_object in utils.supported_languages(include_in_context=True):
        locale_path = utils.local_locale_path(lang_object)
        perseus_path = utils.local_perseus_locale_path(lang_object)

        csv_locale_dir_path = os.path.join(utils.local_locale_csv_path(),
                                           lang_object["crowdin_code"])
        for file_name in os.listdir(csv_locale_dir_path):
            if file_name.endswith("json"):
                # Then it is a Perseus JSON file - just copy it.
                source = os.path.join(csv_locale_dir_path, file_name)
                target = os.path.join(perseus_path, file_name)
                try:
                    os.makedirs(perseus_path)
                except:
                    pass
                shutil.copyfile(source, target)
                continue
            elif not file_name.endswith("csv"):
                continue

            csv_path = os.path.join(csv_locale_dir_path, file_name)

            # Account for csv reading differences in Pythons 2 and 3
            if sys.version_info[0] < 3:
                csv_file = open(csv_path, "rb")
            else:
                csv_file = open(csv_path, "r", newline="")

            with csv_file as f:
                csv_data = list(row for row in csv.DictReader(f))

            data = _locale_data_from_csv(csv_data)

            utils.json_dump_formatted(data, locale_path,
                                      file_name.replace("csv", "json"))