def generate_new_html_for_words(words): """ Go through list of words, and generates the HTML + Ruby characters. *words* a list of chinese words """ words_with_pinyin_html = "" for word in words: debug("{0} : {1}".format(word, "".join(words))) debug('CHARACTER: {0}'.format(word)) if not is_entirely_chinese(word): pi = (' ' * len(word)).split(' ') else: zh = hanzi.to_zhuyin(word) debug('ZHUYIN: {0}'.format(zh)) pi = trans.zhuyin_to_pinyin( zh, accented=not numbered_pinyin).split(' ') debug('PINYIN: {0}'.format(pi)) debug("{0}={1}".format(word, "".join(pi))) words_with_pinyin_html += html.to_html(word, top=pi, minified=True) return words_with_pinyin_html
def generate_new_html_for_words(words): """ Go through list of words, and generates the HTML + Ruby characters. *words* a list of chinese words """ words_with_pinyin_html = "" for word in words: debug("{0} : {1}".format(word, "".join(words))) debug('CHARACTER: {0}'.format(word)) if not is_entirely_chinese(word): pi = (' '*len(word)).split(' ') else: zh = hanzi.to_zhuyin(word) debug('ZHUYIN: {0}'.format(zh)) pi = trans.zhuyin_to_pinyin( zh, accented=not numbered_pinyin).split(' ') debug('PINYIN: {0}'.format(pi)) debug("{0}={1}".format(word, "".join(pi))) words_with_pinyin_html += html.to_html(word, top=pi, minified=True) return words_with_pinyin_html
text = "" text_type = "unknown" current_side, side = _return_correct_side( x, y, top, left, characters, right, bottom) if side in PUT_TEXT_PLACES: text_type = _identify(current_side[char_num]) if side in STACKED_SIDES: text = _stackify(current_side[char_num]) else: text = current_side[char_num] if side in NEW_CHARCTER_PLACES: char_num += 1 _html_add("<td class=\"{0}\">".format(text_type), 3) _html_add("<span>{0}</span>".format(text), 4) _html_add("</td>", 3) _html_add("</tr>", 2) _html_add("</tbody>", 1) _html_add("</table>") return _line_html if __name__ == '__main__': zi = '你好,我叫顏毅。我是加拿大人!' zh = hanzi.to_zhuyin(zi) pi = trans.zhuyin_to_pinyin(hanzi.to_zhuyin(zi)) print(to_html(zi, bottom=pi, right=zh))
def test_zhuyin_to_pinyin(self): self.assertEqual(trans.zhuyin_to_pinyin(self.zhuyin), self.accented_pinyin_spaced.lower()) self.assertEqual(trans.zhuyin_to_pinyin(self.zhuyin, accented=False), self.numbered_pinyin_spaced.lower())
def parse_file(filename, words): with open(filename) as f: data = json.load(f) items_parsed = 0 # Each item in the JSON correspond to one or more entries in the dictionary # Most items map 1:1 to entries, e.g. "物質" is a single entry # Some items are 多音字, so they map to multiple entries (e.g. 重 -> zhòng and chóng) # # In the vocabulary of the the CSLD, each item may correspond to multiple heteronyms, # and each heteronym maps to a single entry. for item in data: # These do not change no matter the heteronym trad = item["title"] simp = HanziConv.toSimplified(trad) jyut = pinyin_jyutping_sentence.jyutping(trad, tone_numbers=True, spaces=True) freq = zipf_frequency(trad, "zh") # Some items have multiple pronunciations (one for Taiwan, one for Mainland China) taiwan_pin = mainland_pin = "" # Build up a list of definitions for each heteronym taiwan_defs = [] mainland_defs = [] # Distinguish between heteronyms by their pinyin – if the pinyin of the # current heteronym does not match the old pinyin, then a new heteronym # must be created last_heteronym_pin = "" last_taiwan_pin = last_mainland_pin = "" # Go through each heteronym, creating Entry objects for each one for heteronym in item["heteronyms"]: if "pinyin" not in heteronym: logging.debug( f'Could not find pinyin for heteronym of word {trad} with definitions {heteronym["definitions"]}' ) continue # Filter out known bad pinyin if (trad in KNOWN_INVALID_SYLLABLES and heteronym["pinyin"] in KNOWN_INVALID_SYLLABLES[trad]): pins = KNOWN_INVALID_SYLLABLES[trad][heteronym["pinyin"]] else: pins = heteronym["pinyin"].split("<br>陸⃝") # Some weird a's cause dragonmapper to break, so replace them with standard a's. pins = list(map(lambda x: x.replace("ɑ", "a"), pins)) # Remove dashes in pinyin pins = list(map(lambda x: x.replace("-", " "), pins)) # Remove commas in pinyin pins = list(map(lambda x: x.replace(",", ""), pins)) # Remove weird characters pins = list(map(lambda x: x.replace("陸⃟", ""), pins)) # Dragonmapper cannot handle some erhua pins = list( map(lambda x: x.replace("diǎr", "diǎn er"), pins)) pins = list( map(lambda x: x.replace("biār", "biān er"), pins)) try: # Converting from pinyin -> zhuyin inserts spaces between characters # Converting from zhuyin -> pinyin conserves these spaces pins = [ transcriptions.zhuyin_to_pinyin( transcriptions.pinyin_to_zhuyin(x), accented=False) for x in pins ] for x in pins: if x.count(" ") >= len(trad): # This means that there was an extra space inserted somewhere; the pinyin is not valid raise ValueError( "Too many spaces in parsed Pinyin!") except Exception as e: # Try parsing zhuyin as a backup pins = heteronym["bopomofo"].split("<br>陸⃝") # Remove weird spaces in zhuyin pins = list(map(lambda x: x.replace(" ", " "), pins)) try: pins = [ transcriptions.zhuyin_to_pinyin(x, accented=False) for x in pins ] except Exception as e: logging.error( f"Unable to split up Pinyin for word {trad}: {e}, skipping word..." ) continue if len(pins) > 1: taiwan_pin = pins[0] mainland_pin = pins[1] else: taiwan_pin = mainland_pin = pins[0] if (last_heteronym_pin != "" and heteronym["pinyin"] != last_heteronym_pin): # A new different pinyin means that we are now processing a new heteronym. # We must create an Entry object for the definitions of the old heteronym # and add it to the list of entries before processing the new one. entry = objects.Entry(trad, simp, last_taiwan_pin, jyut, freq=freq, defs=taiwan_defs) words.append(entry) if last_mainland_pin != last_taiwan_pin: entry = objects.Entry( trad, simp, last_mainland_pin, jyut, freq=freq, defs=mainland_defs, ) words.append(entry) # Reset the definitions list taiwan_defs = [] mainland_defs = [] for definition in heteronym["definitions"]: taiwan_label = "臺" if taiwan_pin != mainland_pin else "" mainland_label = "陸" if mainland_pin != taiwan_pin else "" definition_text = definition["def"] # Take out parts of definitions that should be in labels for pattern in LABEL_REGEX_PATTERNS: if re.match(pattern, definition_text): definition_label, definition_text = re.match( pattern, definition_text).group(1, 2) taiwan_label += ("、" + definition_label if taiwan_label else definition_label) mainland_label += ("、" + definition_label if mainland_label else definition_label) # Remove 臺⃝ and 陸⃝ from definitions, since Qt cannot display them definition_text = definition_text.replace("臺⃝", "臺:") definition_text = definition_text.replace("陸⃝", "陸:") # Insert zero-width spaces so that we can reverse-search the definition taiwan_def_tuple = objects.DefinitionTuple( "".join(jieba.cut(definition_text)), taiwan_label, []) mainland_def_tuple = objects.DefinitionTuple( "".join(jieba.cut(definition_text)), mainland_label, []) # Parse and add examples to this definition if "example" in definition: for example in definition["example"]: if re.match(EXAMPLE_REGEX_PATTERN, example): # Every example is surrounded by "如:<example>", so only keep the example example = re.match(EXAMPLE_REGEX_PATTERN, example).group(1) # Some examples contain multiple examples, so split them up by enclosing brackets 「」 example_texts = re.findall( INDIVIDUAL_EXAMPLE_REGEX_PATTERN, example) else: logging.warning( f"Found example that does not fit the normal example regex pattern: {trad}, {example}" ) # Fall back to splitting on Chinese enumeration comma example_texts = example.split("、") for example_text in example_texts: # Strip out weird whitespace example_text = re.sub(WHITESPACE_REGEX_PATTERN, "", example_text) # Joining and splitting separates series of full-width punctuation marks # into separate items, which is necessary so that lazy_pinyin() returns # separate items for each full-width punctuation mark in the list it returns # # e.g. "《儒林外史.第四六回》:「成老爹道..." turns into # "《 儒 林 外 史 . 第 四 六 回 》 : 「 成 老 爹 道", which turns into # ['《', '儒', '林', '外', '史', '.', '第', '四', '六', '回', '》', ':', '「', '成', '老', '爹', '道'] # (Notice how "》:「"" is now split up into three different items) example_pinyin = lazy_pinyin( " ".join(example_text).split(), style=Style.TONE3, neutral_tone_with_five=True, ) example_pinyin = " ".join( example_pinyin).lower() example_pinyin = example_pinyin.strip( ).replace("v", "u:") # Since the pinyin returned by lazy_pinyin doesn't always match the pinyin # given in the heteronym, attempt to replace pinyin corresponding to the # characters in this heteronym with the pinyin provided by the JSON file. # # e.g. example_text = "重新"; example_pinyin = "zhong4 xin1" (returned by lazy_pinyin) # trad = "重", phrase_pinyin = "chong2" # means that we should convert "zhong4 xin1" to "chong2 xin1" # Strip out variant pronunciations for conversion purposes for index, pin in enumerate( [taiwan_pin, mainland_pin]): phrase_pinyin = pin phrase_pinyin = re.sub( VARIANT_PRONUNCIATION_REGEX_PATTERN, "", phrase_pinyin, ) phrase_pinyin = re.sub( COLLOQUIAL_PRONUNCIATION_REGEX_PATTERN, "", phrase_pinyin, ) # Do not try to match entries formatted like "那搭(Namibia)" if not re.match( STRANGE_ENTRY_REGEX_PATTERN, trad): try: example_pinyin = ( change_pinyin_to_match_phrase( example_text, example_pinyin, trad, phrase_pinyin, )) except Exception as e: logging.warning( f"Couldn't change pinyin in example for word {trad}: " f"{''.join(example_text)}, {example_pinyin}, {pin}, " f"{e}") traceback.print_exc() if index == 0: taiwan_def_tuple.examples.append( objects.ExampleTuple( "zho", example_pinyin, example_text)) elif index == 1: mainland_def_tuple.examples.append( objects.ExampleTuple( "zho", example_pinyin, example_text)) taiwan_defs.append(taiwan_def_tuple) mainland_defs.append(mainland_def_tuple) last_heteronym_pin = heteronym["pinyin"] last_taiwan_pin = taiwan_pin last_mainland_pin = mainland_pin entry = objects.Entry(trad, simp, taiwan_pin, jyut, freq=freq, defs=taiwan_defs) words.append(entry) if mainland_pin != taiwan_pin: entry = objects.Entry(trad, simp, mainland_pin, jyut, freq=freq, defs=mainland_defs) words.append(entry) items_parsed += 1 if not items_parsed % 500: print(f"Parsed entry #{items_parsed}")
def parse_same_meaning_file(filename, words): for line in read_csv(filename): if len(line) != 17 or line[0] == "總分類": continue terms = defaultdict(set) for index in (4, 5, 6): if line[index]: terms["臺"].add(line[index]) for index in (7, 8, 9): if line[index]: terms["陸"].add(line[index]) for index in (10, 11, 12): if line[index]: terms["香"].add(line[index]) for index in (13, 14, 15): if line[index]: terms["澳"].add(line[index]) explanation = None if line[16]: explanation = objects.DefinitionTuple( "".join(jieba.cut(line[16])), "差異說明", []) for location in terms: for term in terms[location]: trad = term simp = HanziConv.toSimplified(trad) if term == line[4] and line[2]: # Use the provided pinyin, which always corresponds at least to the first Taiwan term pin = transcriptions.zhuyin_to_pinyin(line[2].replace( " ", " "), accented=False) else: pin = lazy_pinyin( trad, style=Style.TONE3, neutral_tone_with_five=True, ) pin = " ".join(pin).lower() pin = pin.strip().replace("v", "u:") jyut = pinyin_jyutping_sentence.jyutping(trad, tone_numbers=True, spaces=True) freq = zipf_frequency(trad, "zh") defs = terms.keys() defs = map( lambda x: objects.DefinitionTuple("、".join(terms[x]), line[ 1] + ":" + x, []), defs, ) defs = list(defs) if explanation: defs.append(explanation) entry = objects.Entry(trad, simp, pin, jyut, freq=freq, defs=defs) words.add(entry)
hangul_romanize_transliter = Transliter(academic) romanizers: Mapping[Locale, Callable[[str], Markup]] = { Locale.parse('ja'): lambda t: Markup(to_roma(t.replace(' ', ''))), Locale.parse('ko'): lambda t: Markup(hangul_romanize_transliter.translit(t.replace(' ', ''))), Locale.parse('zh_CN'): lambda t: Markup(to_pinyin(t).replace(' ', '')), Locale.parse('zh_HK'): lambda t: Markup( re.sub( r'(\d) ?', r'<sup>\1</sup>', t if re.match(r'^[A-Za-z0-9 ]+$', t) else pinyin_jyutping_sentence.jyutping(t, True, True))), Locale.parse('zh_TW'): lambda t: Markup(zhuyin_to_pinyin(to_zhuyin(t)).replace(' ', '')), } def romanize(term: str, locale: Locale) -> Markup: global romanizers try: f = romanizers[locale] except KeyError: return Markup(term.replace(' ', '')) return f(term) class Word(Sequence[Term]): def __init__(self, id: str, locale: Locale, terms: Iterable[Term]): self.id = id
else: print(err) usage() htmltext = "" zh_phons = "" pi_phons = "" pi_phons_ac = "" phonperchar = [] hanzichars = [] phonpairs = [] zh_phons = hanzi.to_zhuyin(hanziin).split(" ") pi_phons = transcriptions.zhuyin_to_pinyin(hanzi.to_zhuyin(hanziin), accented=False).split(" ") pi_phons_ac = transcriptions.zhuyin_to_pinyin(hanzi.to_zhuyin(hanziin), accented=True).split(" ") if remove_chars: for i in range(0, len(zh_phons)): try: remove = known_chars[i] verbose_print(remove) if remove == "2": if zhuyin: zh_phons[i] = "" if pinyin: pi_phons[i] = "" elif remove == "1": if zhuyin: zh_phons[i] = phonetics_to_tone_marks(zh_phons[i], accented)