def prepare_to_write(database): file_buffer = dict() substitutions = dict() oldsubs = dict() print("Trying to merge with old data...") try: with open_n_decode(sub_file, "r", 'utf-8') as f: oldsubs = load(f) except: print("No old data found, creating new database.") for section, thedatabase in database.items(): with Pool() as p: # Do it parallel result = p.imap_unordered(process_label, [(f, d, oldsubs, section) for f,d in thedatabase.items() ], 40) for fn, js, sb in result: # Merge results for fs, flds in sb.items(): if fs not in substitutions: substitutions[fs] = flds else: substitutions[fs].update(flds) if fn not in file_buffer: file_buffer[fn] = list() file_buffer[fn].append(js) file_buffer[sub_file] = substitutions return file_buffer
def parseFile(filename): chunk = list() with open_n_decode(filename, "r", "utf-8") as f: string = prepare(f) jsondata = dict() try: jsondata = loads(string) except: print("Cannot parse " + filename) return [] paths = list_field_paths(jsondata) dialog = dirname(filename).endswith("dialog") for path in paths: for k in files_of_interest.keys(): if filename.endswith(k) or k == "*": for roi in files_of_interest[k]: if roi.match(path) or dialog: val = field_by_path(jsondata, path) if not type(val) is str: print("File: " + filename) print("Type of " + path + " is not a string!") continue if val == "": continue for handler in textHandlers: res = handler(val, filename, '/' + path) if res: chunk += res break break return chunk
def write_file(filename, content): filedir = dirname(filename) if not filename.endswith("substitutions.json"): content = sorted(content, key=lambda x: x["Texts"]["Eng"]) if len(filedir) > 0: makedirs(filedir, exist_ok=True) else: raise Exception("Filename without dir: " + filename) with open_n_decode(filename, "w", 'utf-8') as f: dump(content, f, ensure_ascii=False, indent=2, sort_keys=True)
def process_label(combo): ## Creates json file structure for given label then returns ## tuple of filename, translation and substitutions ## combo - a tuple of 3 arguments: label, files and oldsubs ## label - english text from database ## files - filelist were english text used (also from database) ## oldsubs - the parsed json content of substitutions.json from ## previous database if it exists ## Returned tuple: ## translation - a part of json file content to write into the database ## filename - a name of file the translation should be added ## substitutions - a part of new formed substitutions file content label, files, oldsubs, section = combo substitutions = dict() obj_file = normpath(getSharedPath(files.keys())) translation = dict() if section: translation["Comment"] = section translation["Texts"] = dict() translation["Texts"]["Eng"] = label translation["DeniedAlternatives"] = list() filename = "" for thefile, fields in files.items(): for field in fields: fieldend = basename(field) if fieldend in specialSharedPaths: obj_file = normpath(specialSharedPaths[fieldend]) if obj_file == '.': obj_file = "wide_spread_fields" filename = normpath(join(prefix, texts_prefix, obj_file + ".json")) if thefile != obj_file or fieldend in ["glitchEmotedText"]: if thefile not in substitutions: substitutions[thefile] = dict() substitutions[thefile][field] = normpath( relpath(filename, prefix)) oldfile = normpath( join(prefix, file_by_assets(thefile, field, oldsubs))) if exists(oldfile): olddata = [] try: with open_n_decode(oldfile, 'r', 'utf-8') as f: olddata = load(f) except: pass # If can not get old translation for any reason just skip it for oldentry in olddata: if oldentry["Texts"]["Eng"] == label: if "DeniedAlternatives" in oldentry: for a in oldentry["DeniedAlternatives"]: if a not in translation["DeniedAlternatives"]: insort_left( translation["DeniedAlternatives"], a) translation["Texts"].update(oldentry["Texts"]) break translation["Files"] = files return (filename, translation, substitutions)
def parseFile(filename): chunk = list() if basename(filename) not in ignore_filelist: print(basename(filename)) with open_n_decode(filename, "r", "utf_8_sig") as f: try: if basename(filename).endswith('.patch'): chunk.append("patch") if basename(filename) in dict.keys(patch_serialization): string = trans_patch( f, patch_serialization[basename(filename)]) else: string = trans_patch(f) paths = to_a_list(string, 0) else: string = prepare(f) jsondata = loads(string) paths = list_field_paths(jsondata) except: print("Cannot parse " + filename) try: problem_file = open(error_list_file, 'a') problem_file.writelines( filename.replace(root_dir, '') + '\n') problem_file.close() except: pass return [] filename_base = filename if basename(filename).endswith('.patch'): filename = filename.replace('.patch', "") dialog = dirname(filename).endswith("dialog") for i, path in enumerate(paths): for k in files_of_interest.keys(): if filename.endswith(k) or k == "*": for roi in files_of_interest[k]: if roi.match(path) or dialog: if basename(filename_base).endswith('.patch'): val = to_a_list(string, 1)[i] else: val = field_by_path(jsondata, path) if not type(val) is str: print("File: " + filename) print("Type of " + path + " is not a string!") continue if val == "": continue for handler in textHandlers: res = handler(val, filename, '/' + path) if res: chunk += res break break return chunk
def process_label(combo): ## Creates json file structure for given label then returns ## tuple of filename, translation and substitutions ## combo - a tuple of 3 arguments: label, files and oldsubs ## label - english text from database ## files - filelist were english text used (also from database) ## oldsubs - the parsed json content of substitutions.json from ## previous database if it exists ## Returned tuple: ## translation - a part of json file content to write into the database ## filename - a name of file the translation should be added ## substitutions - a part of new formed substitutions file content label, files, oldsubs, section = combo substitutions = dict() obj_file = normpath(getSharedPath(files.keys())) translation = dict() if section: translation["Comment"] = section translation["Texts"] = dict() translation["Texts"]["Eng"] = label translation["DeniedAlternatives"] = list() filename = "" for thefile, fields in files.items(): for field in fields: fieldend = basename(field) if fieldend in specialSharedPaths: obj_file = normpath(specialSharedPaths[fieldend]) if obj_file == '.': obj_file = "wide_spread_fields" filename = normpath(join(prefix, texts_prefix, obj_file + ".json")) if thefile != obj_file or fieldend in ["glitchEmotedText"]: if thefile not in substitutions: substitutions[thefile] = dict() substitutions[thefile][field] = normpath(relpath(filename, prefix)) oldfile = normpath(join(prefix, file_by_assets(thefile, field, oldsubs))) if exists(oldfile): olddata = [] try: with open_n_decode(oldfile, 'r', 'utf-8') as f: olddata = load(f) except: pass # If can not get old translation for any reason just skip it for oldentry in olddata: if oldentry["Texts"]["Eng"] == label: if "DeniedAlternatives" in oldentry: for a in oldentry["DeniedAlternatives"]: if a not in translation["DeniedAlternatives"]: insort_left(translation["DeniedAlternatives"], a) translation["Texts"].update(oldentry["Texts"]) break translation["Files"] = files return (filename, translation, substitutions)
def process_label(combo): label, files, oldsubs, section = combo substitutions = dict() obj_file = normpath(getSharedPath(files.keys())) translation = dict() if section: translation["Comment"] = section translation["Texts"] = dict() translation["Texts"]["Eng"] = label translation["DeniedAlternatives"] = list() filename = "" for thefile, fields in files.items(): for field in fields: fieldend = basename(field) if fieldend in specialSharedPaths: obj_file = normpath(specialSharedPaths[fieldend]) if obj_file == '.': obj_file = "wide_spread_fields" filename = normpath(join(prefix, texts_prefix, obj_file + ".json")) if thefile != obj_file or fieldend in ["glitchEmotedText"]: if thefile not in substitutions: substitutions[thefile] = dict() substitutions[thefile][field] = normpath( relpath(filename, prefix)) oldfile = normpath( join(prefix, file_by_assets(thefile, field, oldsubs))) if exists(oldfile): olddata = [] try: with open_n_decode(oldfile, 'r', 'utf-8') as f: olddata = load(f) except: pass for oldentry in olddata: if oldentry["Texts"]["Eng"] == label: if "DeniedAlternatives" in oldentry: for a in oldentry["DeniedAlternatives"]: if a not in translation["DeniedAlternatives"]: insort_left( translation["DeniedAlternatives"], a) translation["Texts"].update(oldentry["Texts"]) break translation["Files"] = files return (filename, translation, substitutions)
def parseFile(filename): chunk = list() if basename(filename) not in ignore_filelist_patch and basename( filename).endswith('.patch'): print(basename(filename)) with open_n_decode(filename, "r", "utf_8_sig") as f: try: if basename(filename) in patchfile_spciallist1: string = trans_patch(f) elif basename(filename) in patchfile_spciallist2: string = trans_patch(f) else: string = trans_patch(f) except: print("Cannot parse " + filename) problem_file = open(pro_list, 'a') problem_file.writelines(filename.replace(root_dir, '') + '\n') problem_file.close() return [] paths = to_a_list(string, 0) dialog = dirname(filename.replace('.patch', '')).endswith("dialog") for i, path in enumerate(paths): for k in files_of_interest.keys(): if filename.replace('.patch', '').endswith(k) or k == "*": for roi in files_of_interest[k]: if roi.match(path) or dialog: val = to_a_list(string, 1)[i] if not type(val) is str: print("File: " + filename) print("Type of " + path + " is not a string!") continue if val == "": continue for handler in textHandlers: res = handler( val, filename.replace('.patch', ''), '/' + path) if res: chunk += res break break return chunk
if not re.search(rule[0]+'/'+'-', text) == None: wait = text.replace(rule[0]+'/'+'-', rule[0]+'/'+str(o)) path_list_3.append(wait) o = o+1 else: path_list_3.append(text) else: if not re.search(rule[0]+'/'+'-', text) == None: wait = text.replace(rule[0]+'/-', rule[0]+'/'+str(rule[1])) path_list_3.append(wait) else: path_list_3.append(text) return path_list_3 """ if __name__ == "__main__": jsons3 = open_n_decode( 'F:/FrackinUniverse-sChinese-Project/translations/others/dialog/converse.config.patch', "r", "utf_8_sig") list233 = [('generic', 70, 1),('cheerful', 31, 1),('jerk', 31, 1),('flirty', 31, 1),('anxious', 31, 1),('easilyspooked',32,1),('clumsy',31,1),('excited',31,1),('intrusive',31,1),('dumb',32,1),('emo',30,1),('fast',31,1),('nocturnal',32,1),('socialite',31,1),('ambitious',30,1)] test = trans_patch(jsons3) dict_old = dict() for i in range(len(test)): dict_old['/'+test[i][0]] = test[i][1] print(dict_old) """ if __name__ == "__main__": json_file_3 = open_n_decode( 'F:/sunlesssee/Sunless Sea_bak/entities/events.json', "r", "utf_8_sig") print(json.loads(prepare(json_file_3)))
path_2 = list_field_paths(i[2]) if path_2 == []: path_2 = ['*'] else: pass for v in path_2: if path_2 == ['*']: value = i[2] path = path_1.replace('/','',1) value_list_2.append(value) path_list_2.append(path) else: value = field_by_path(i[2],v) path = (path_1+'/'+ v).replace('/','',1) value_list_2.append(value) path_list_2.append(path) else: pass result = tuple(zip(path_list_2,value_list_2)) return result def convert(jsons): raw = jsons.read() newRaw = raw.decode('utf8') print(newRaw) """ if __name__ == "__main__": jsons3 = open_n_decode( 'F:/FrackinUniverse/objects/crafting/upgradeablecraftingobjects/craftingmedical/craftingmedical.object.patch', "r", "utf_8_sig") test = trans_patch(jsons3) print(test)