def make_zipfile_backup(startpath: str, backup_suffix: str) -> bool: """ Make a .zip backup of the folder 'startpath' and all its contents. Returns True if all goes well, False if it should abort. Resulting zip will be adjacent to the folder it is backing up with a slightly different name. :param startpath: absolute path of the folder you want to zip :param backup_suffix: segment inserted between the foldername and .zip extension :return: true if things are good, False if i should abort """ # need to add .zip for checking against already-exising files and for printing zipname = startpath + "." + backup_suffix + ".zip" zipname = core.get_unused_file_name(zipname) core.MY_PRINT_FUNC("...making backup archive:") core.MY_PRINT_FUNC(zipname) try: root_dir = os.path.dirname(startpath) base_dir = os.path.basename(startpath) # need to remove .zip suffix because zipper forcefully adds .zip whether its already on the name or not shutil.make_archive(zipname[:-4], 'zip', root_dir, base_dir) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) info = [ "ERROR3! Unable to create zipfile for backup.", "Do you want to continue without a zipfile backup?", "1 = Yes, 2 = No (abort)" ] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 2: return False return True
def main(moreinfo=False): # step zero: verify that Pillow exists if Image is None: core.MY_PRINT_FUNC("ERROR: Python library 'Pillow' not found. This script requires this library to run!") core.MY_PRINT_FUNC("This script cannot be ran from the EXE version, the Pillow library is too large to package into the executable.") core.MY_PRINT_FUNC("To install Pillow, please use the command 'pip install Pillow' in the Windows command prompt and then run the Python scripts directly.") return None # print pillow version just cuz core.MY_PRINT_FUNC("Using Pillow version '%s'" % Image.__version__) core.MY_PRINT_FUNC("Please enter name of PMX model file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") # absolute path to directory holding the pmx input_filename_pmx_abs = os.path.normpath(os.path.abspath(input_filename_pmx)) startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files relative_all_exist_files = file_sort_textures.walk_filetree_from_root(startpath) core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files)) # now fill "neighbor_pmx" by finding files without path separator that end in PMX # these are relative paths tho neighbor_pmx = [f for f in relative_all_exist_files if (f.lower().endswith(".pmx")) and (os.path.sep not in f) and f != input_filename_pmx_rel] core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx)) # filter down to just image files relevant_exist_files = [f for f in relative_all_exist_files if f.lower().endswith(IMG_EXT)] core.MY_PRINT_FUNC("RELEVANT EXISTING FILES:", len(relevant_exist_files)) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # now ask if I care about the neighbors and read the PMXes into memory pmx_filenames = [input_filename_pmx_rel] if neighbor_pmx: core.MY_PRINT_FUNC("") info = [ "Detected %d top-level neighboring PMX files, these probably share the same filebase as the target." % len(neighbor_pmx), "If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.", "Do you want to process all neighbors in addition to the target? (highly recommended)", "1 = Yes, 2 = No"] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 1: core.MY_PRINT_FUNC("Processing target + all neighbor files") # append neighbor PMX files onto the list of files to be processed pmx_filenames += neighbor_pmx else: core.MY_PRINT_FUNC("WARNING: Processing only target, ignoring %d neighbor PMX files" % len(neighbor_pmx)) # now read all the PMX objects & store in dict alongside the relative name # dictionary where keys are filename and values are resulting pmx objects all_pmx_obj = {} for this_pmx_name in pmx_filenames: this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name), moreinfo=moreinfo) all_pmx_obj[this_pmx_name] = this_pmx_obj # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk # also fill out how much and how each file is used, and unify dupes between files, all that good stuff filerecord_list = file_sort_textures.categorize_files(all_pmx_obj, relevant_exist_files, moreinfo) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # DETERMINE NEW NAMES FOR FILES # first, create a backup of the folder # save the name, so that i can delete it if i didn't make any changes zipfile_name = "" if MAKE_BACKUP_ZIPFILE: r = file_sort_textures.make_zipfile_backup(startpath, BACKUP_SUFFIX) if not r: # this happens if the backup failed somehow AND the user decided to quit core.MY_PRINT_FUNC("Aborting: no files were changed") return None zipfile_name = r # name used for temporary location tempfilename = os.path.join(startpath,"temp_image_file_just_delete_me.png") pil_cannot_inspect = 0 pil_cannot_inspect_list = [] pil_imgext_mismatch = 0 num_recompressed = 0 # list of memory saved by recompressing each file. same order/length as "image_filerecords" mem_saved = [] # make image persistient, so I know it always exists and I can always call "close" before open im = None # only iterate over images that exist, obviously image_filerecords = [f for f in filerecord_list if f.exists] # iterate over the images for i, p in enumerate(image_filerecords): abspath = os.path.join(startpath, p.name) orig_size = os.path.getsize(abspath) # if not moreinfo, then each line overwrites the previous like a progress printout does # if moreinfo, then each line is printed permanently core.MY_PRINT_FUNC("...analyzing {:>3}/{:>3}, file='{}', size={} ".format( i+1, len(image_filerecords), p.name, core.prettyprint_file_size(orig_size)), is_progress=(not moreinfo)) mem_saved.append(0) # before opening, try to close it just in case if im is not None: im.close() # open the image & catch all possible errors try: im = Image.open(abspath) except FileNotFoundError as eeee: core.MY_PRINT_FUNC("FILESYSTEM MALFUNCTION!!", eeee.__class__.__name__, eeee) core.MY_PRINT_FUNC("os.walk created a list of all filenames on disk, but then this filename doesn't exist when i try to open it?") im = None except OSError as eeee: # this has 2 causes, "Unsupported BMP bitfields layout" or "cannot identify image file" if DEBUG: print("CANNOT INSPECT!1", eeee.__class__.__name__, eeee, p.name) im = None except NotImplementedError as eeee: # this is because there's some DDS format it can't make sense of if DEBUG: print("CANNOT INSPECT!2", eeee.__class__.__name__, eeee, p.name) im = None if im is None: pil_cannot_inspect += 1 pil_cannot_inspect_list.append(p.name) continue if im.format not in IMG_TYPE_TO_EXT: core.MY_PRINT_FUNC("WARNING: file '%s' has unusual image format '%s', attempting to continue" % (p.name, im.format)) # now the image is successfully opened! newname = p.name base, currext = os.path.splitext(newname) # 1, depending on image format, attempt to re-save as PNG if im.format not in IM_FORMAT_ALWAYS_SKIP: # delete temp file if it still exists if os.path.exists(tempfilename): try: os.remove(tempfilename) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR1: failed to delete temp image file '%s' during processing" % tempfilename) break # save to tempfilename with png format, use optimize=true try: im.save(tempfilename, format="PNG", optimize=True) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR2: failed to re-compress image '%s', original not modified" % p.name) continue # measure & compare file size new_size = os.path.getsize(tempfilename) diff = orig_size - new_size # if using a 16-bit BMP format, re-save back to bmp with same name is_bad_bmp = False if im.format == "BMP": try: # this might fail, images are weird, sometimes they don't have the attributes i expect if im.tile[0][3][0] in KNOWN_BAD_FORMATS: is_bad_bmp = True except Exception as e: if DEBUG: print(e.__class__.__name__, e, "BMP THING", p.name, im.tile) if diff > (REQUIRED_COMPRESSION_AMOUNT_KB * 1024) \ or is_bad_bmp\ or im.format in IM_FORMAT_ALWAYS_CONVERT: # if it frees up at least XXX kb, i will keep it! # also keep it if it is a bmp encoded with 15-bit or 16-bit colors # set p.newname = png, and delete original and move tempname to base.png try: # delete original os.remove(os.path.join(startpath, p.name)) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR3: failed to delete old image '%s' after recompressing" % p.name) continue newname = base + ".png" # resolve potential collisions by adding numbers suffix to file names # first need to make path absolute so get_unused_file_name can check the disk. newname = os.path.join(startpath, newname) # then check uniqueness against files on disk newname = core.get_unused_file_name(newname) # now dest path is guaranteed unique against other existing files # make the path no longer absolute: undo adding "startpath" above newname = os.path.relpath(newname, startpath) try: # move new into place os.rename(tempfilename, os.path.join(startpath, newname)) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR4: after deleting original '%s', failed to move recompressed version into place!" % p.name) continue num_recompressed += 1 p.newname = newname mem_saved[-1] = diff continue # if succesfully re-saved, do not do the extension-checking below # if this is not sufficiently compressed, do not use "continue", DO hit the extension-checking below # 2, if the file extension doesn't match with the image type, then make it match # this only happens if the image was not re-saved above if im.format in IMG_TYPE_TO_EXT and currext not in IMG_TYPE_TO_EXT[im.format]: newname = base + IMG_TYPE_TO_EXT[im.format][0] # resolve potential collisions by adding numbers suffix to file names # first need to make path absolute so get_unused_file_name can check the disk. newname = os.path.join(startpath, newname) # then check uniqueness against files on disk newname = core.get_unused_file_name(newname) # now dest path is guaranteed unique against other existing files # make the path no longer absolute: undo adding "startpath" above newname = os.path.relpath(newname, startpath) # do the actual rename here & now try: # os.renames creates all necessary intermediate folders needed for the destination # it also deletes the source folders if they become empty after the rename operation os.renames(os.path.join(startpath, p.name), os.path.join(startpath, newname)) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR5: unable to rename file '%s' --> '%s', attempting to continue with other file rename operations" % (p.name, newname)) continue pil_imgext_mismatch += 1 p.newname = newname continue # these must be the same length after iterating assert len(mem_saved) == len(image_filerecords) # if the image is still open, close it if im is not None: im.close() # delete temp file if it still exists if os.path.exists(tempfilename): try: os.remove(tempfilename) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("WARNING: failed to delete temp image file '%s' after processing" % tempfilename) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # are there any with proposed renaming? if not any(u.newname is not None for u in image_filerecords): core.MY_PRINT_FUNC("No proposed file changes") # if nothing was changed, delete the backup zip! core.MY_PRINT_FUNC("Deleting backup archive") if os.path.exists(zipfile_name): try: os.remove(zipfile_name) except OSError as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("WARNING: failed to delete pointless zip file '%s'" % zipfile_name) core.MY_PRINT_FUNC("Aborting: no files were changed") return None # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # finally, do the actual renaming: # do all renaming in PMXes file_sort_textures.apply_file_renaming(all_pmx_obj, image_filerecords, startpath, skipdiskrename=True) # write out for this_pmx_name, this_pmx_obj in all_pmx_obj.items(): # NOTE: this is OVERWRITING THE PREVIOUS PMX FILE, NOT CREATING A NEW ONE # because I make a zipfile backup I don't need to feel worried about preserving the old version output_filename_pmx = os.path.join(startpath, this_pmx_name) # output_filename_pmx = core.get_unused_file_name(output_filename_pmx) pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # NOW PRINT MY RENAMINGS and other findings filerecord_with_savings = zip(image_filerecords, mem_saved) changed_files = [u for u in filerecord_with_savings if u[0].newname is not None] core.MY_PRINT_FUNC("="*60) if pil_cannot_inspect: core.MY_PRINT_FUNC("WARNING: failed to inspect %d image files, these must be handled manually" % pil_cannot_inspect) core.MY_PRINT_FUNC(pil_cannot_inspect_list) if num_recompressed: core.MY_PRINT_FUNC("Recompressed %d images! %s of disk space has been freed" % (num_recompressed, core.prettyprint_file_size(sum(mem_saved)))) if pil_imgext_mismatch: core.MY_PRINT_FUNC("Renamed %d images that had incorrect extensions (included below)" % pil_imgext_mismatch) oldname_list = [p[0].name for p in changed_files] oldname_list_j = core.MY_JUSTIFY_STRINGLIST(oldname_list) newname_list = [p[0].newname for p in changed_files] newname_list_j = core.MY_JUSTIFY_STRINGLIST(newname_list) savings_list = [("" if p[1]==0 else "saved " + core.prettyprint_file_size(p[1])) for p in changed_files] zipped = list(zip(oldname_list_j, newname_list_j, savings_list)) zipped_and_sorted = sorted(zipped, key=lambda y: file_sort_textures.sortbydirdepth(y[0])) for o,n,s in zipped_and_sorted: # print 'from' with the case/separator it uses in the PMX core.MY_PRINT_FUNC(" {:s} --> {:s} | {:s}".format(o, n, s)) core.MY_PRINT_FUNC("Done!") return None
def main(moreinfo=True): # prompt PMX name core.MY_PRINT_FUNC("Please enter name of PMX input file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo) core.MY_PRINT_FUNC("") # valid input is any string that can matched aginst a morph idx s = core.MY_GENERAL_INPUT_FUNC( lambda x: get_idx_in_pmxsublist(x, pmx.morphs) is not None, [ "Please specify the target morph: morph #, JP name, or EN name (names are not case sensitive).", "Empty input will quit the script." ]) # do it again, cuz the lambda only returns true/false target_index = get_idx_in_pmxsublist(s, pmx.morphs) # when given empty text, done! if target_index == -1 or target_index is None: core.MY_PRINT_FUNC("quitting") return None # determine the morph type morphtype = pmx.morphs[target_index].morphtype core.MY_PRINT_FUNC("Found {} morph #{}: '{}' / '{}'".format( morphtype, target_index, pmx.morphs[target_index].name_jp, pmx.morphs[target_index].name_en)) # if it is a bone morph, ask for translation/rotation/both bone_mode = 0 if morphtype == pmxstruct.MorphType.BONE: bone_mode = core.MY_SIMPLECHOICE_FUNC((1, 2, 3), [ "Bone morph detected: do you want to scale the motion(translation), rotation, or both?", "1 = motion(translation), 2 = rotation, 3 = both" ]) # ask for factor: keep looping this prompt until getting a valid float def is_float(x): try: v = float(x) return True except ValueError: core.MY_PRINT_FUNC("Please enter a decimal number") return False factor_str = core.MY_GENERAL_INPUT_FUNC( is_float, "Enter the factor that you want to scale this morph by:") if factor_str == "": core.MY_PRINT_FUNC("quitting") return None factor = float(factor_str) # important values: target_index, factor, morphtype, bone_mode # first create the new morph that is a copy of current if SCALE_MORPH_IN_PLACE: newmorph = pmx.morphs[target_index] else: newmorph = copy.deepcopy(pmx.morphs[target_index]) # then modify the names name_suffix = "*" + (str(factor)[0:6]) newmorph.name_jp += name_suffix newmorph.name_en += name_suffix # now scale the actual values r = morph_scale(newmorph, factor, bone_mode) if not r: core.MY_PRINT_FUNC("quitting") return None pmx.morphs.append(newmorph) # write out output_filename_pmx = input_filename_pmx[0:-4] + ("_%dscal.pmx" % target_index) output_filename_pmx = core.get_unused_file_name(output_filename_pmx) pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None
def main(moreinfo=False): core.MY_PRINT_FUNC("Please enter name of PMX model file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") # step zero: set up the translator thingy translate_to_english.init_googletrans() # texture sorting plan: # 1. get startpath = basepath of input PMX # 2. get lists of relevant files # 2a. extract top-level 'neighbor' pmx files from all-set # 3. ask about modifying neighbor PMX # 4. read PMX: either target or target+all neighbor # 5. "categorize files & normalize usages within PMX", NEW FUNC!!! # 6. translate all names via Google Trans, don't even bother with local dict # 7. mask out invalid windows filepath chars just to be safe # 8. print proposed names & other findings # for unused files under a folder, combine & replace with *** # 9. ask for confirmation # 10. zip backup (NEW FUNC!) # 11. apply renaming, NEW FUNC! rename all including old PMXes on disk # 12. get new names for PMXes, write PMX from mem to disk if any of its contents changed # i.e. of all FileRecord with a new name, create a set of all the PMX that use them # absolute path to directory holding the pmx input_filename_pmx_abs = os.path.normpath( os.path.abspath(input_filename_pmx)) startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files relative_all_exist_files = file_sort_textures.walk_filetree_from_root( startpath) core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files)) # now fill "neighbor_pmx" by finding files without path separator that end in PMX # these are relative paths tho neighbor_pmx = [ f for f in relative_all_exist_files if (f.lower().endswith(".pmx")) and ( os.path.sep not in f) and f != input_filename_pmx_rel ] # no filtering, all files are relevant relevant_exist_files = relative_all_exist_files core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx)) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # now ask if I care about the neighbors and read the PMXes into memory pmx_filenames = [input_filename_pmx_rel] if neighbor_pmx: core.MY_PRINT_FUNC("") info = [ "Detected %d top-level neighboring PMX files, these probably share the same filebase as the target." % len(neighbor_pmx), "If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.", "Do you want to process all neighbors in addition to the target? (highly recommended)", "1 = Yes, 2 = No" ] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 1: core.MY_PRINT_FUNC("Processing target + all neighbor files") # append neighbor PMX files onto the list of files to be processed pmx_filenames += neighbor_pmx else: core.MY_PRINT_FUNC( "WARNING: Processing only target, ignoring %d neighbor PMX files" % len(neighbor_pmx)) # now read all the PMX objects & store in dict alongside the relative name # dictionary where keys are filename and values are resulting pmx objects all_pmx_obj = {} for this_pmx_name in pmx_filenames: this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name), moreinfo=moreinfo) all_pmx_obj[this_pmx_name] = this_pmx_obj # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk # also fill out how much and how each file is used, and unify dupes between files, all that good stuff filerecord_list = file_sort_textures.categorize_files( all_pmx_obj, relevant_exist_files, moreinfo) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # DETERMINE NEW NAMES FOR FILES # how to remap: build a list of all destinations (lowercase) to see if any proposed change would lead to collision all_new_names = set() # get new names via google # force it to use chunk-wise translate newname_list = translate_to_english.google_translate( [p.name for p in filerecord_list], strategy=1) # now repair any windows-forbidden symbols that might have shown up after translation newname_list = [ n.translate(invalid_windows_chars_ord) for n in newname_list ] # iterate over the results in parallel with the FileRecord items for p, newname in zip(filerecord_list, newname_list): if newname != p.name: # resolve potential collisions by adding numbers suffix to file names # first need to make path absolute so get_unused_file_name can check the disk. # then check uniqueness against files on disk and files in namelist (files that WILL be on disk) newname = core.get_unused_file_name(os.path.join( startpath, newname), namelist=all_new_names) # now dest path is guaranteed unique against other existing files & other proposed name changes all_new_names.add(newname.lower()) # make the path no longer absolute: undo adding "startpath" above newname = os.path.relpath(newname, startpath) p.newname = newname # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # NOW PRINT MY PROPOSED RENAMINGS and other findings # isolate the ones with proposed renaming translated_file = [u for u in filerecord_list if u.newname is not None] if translated_file: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC("Found %d JP filenames to be translated:" % len(translated_file)) oldname_list = core.MY_JUSTIFY_STRINGLIST( [p.name for p in translated_file]) newname_list = [p.newname for p in translated_file] zipped = list(zip(oldname_list, newname_list)) zipped_and_sorted = sorted( zipped, key=lambda y: file_sort_textures.sortbydirdepth(y[0])) for o, n in zipped_and_sorted: # print 'from' with the case/separator it uses in the PMX core.MY_PRINT_FUNC(" {:s} --> {:s}".format(o, n)) core.MY_PRINT_FUNC("=" * 60) else: core.MY_PRINT_FUNC("No proposed file changes") core.MY_PRINT_FUNC("Aborting: no files were changed") return None info = [ "Do you accept these new names/locations?", "1 = Yes, 2 = No (abort)" ] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 2: core.MY_PRINT_FUNC("Aborting: no files were changed") return None # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # finally, do the actual renaming: # first, create a backup of the folder if MAKE_BACKUP_BEFORE_RENAMES: r = file_sort_textures.make_zipfile_backup(startpath, BACKUP_SUFFIX) if not r: # this happens if the backup failed somehow AND the user decided to quit core.MY_PRINT_FUNC("Aborting: no files were changed") return None # do all renaming on disk and in PMXes, and also handle the print statements file_sort_textures.apply_file_renaming(all_pmx_obj, filerecord_list, startpath) # write out for this_pmx_name, this_pmx_obj in all_pmx_obj.items(): # what name do i write this pmx to? it may be different now! find it in the FileRecord! # this script does not filter filerecord_list so it is guaranteed to hae a record rec = None for r in filerecord_list: if r.name == this_pmx_name: rec = r break if rec.newname is None: # if there is no new name, write back to the name it had previously new_pmx_name = rec.name else: # if there is a new name, write to the new name new_pmx_name = rec.newname # make the name absolute output_filename_pmx = os.path.join(startpath, new_pmx_name) # write it, overwriting the existing file at that name pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None
def main(moreinfo=True): # prompt PMX name core.MY_PRINT_FUNC("Please enter name of PMX input file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") # input_filename_pmx = "../../python_scripts/grasstest_better.pmx" pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo) ################################## # user flow: # first ask whether they want to add armtwist, yes/no # second ask whether they want to add legtwist, yes/no # then do it # then write out to file ################################## working_queue = [] s = core.MY_SIMPLECHOICE_FUNC((1, 2), [ "Do you wish to add magic twistbones to the ARMS?", "1 = Yes, 2 = No" ]) if s == 1: # add upperarm set and lowerarm set to the queue working_queue.append(armset) working_queue.append(wristset) pass s = core.MY_SIMPLECHOICE_FUNC((1, 2), [ "Do you wish to add magic twistbones to the LEGS?", "1 = Yes, 2 = No" ]) if s == 1: # TODO detect whether d-bones exist or not # add legs or d-legs set to the queue pass if not working_queue: core.MY_PRINT_FUNC("Nothing was changed") core.MY_PRINT_FUNC("Done") return None # for each set in the queue, for boneset in working_queue: # boneset = (start, end, preferred, oldrigs, bezier) for side in [jp_l, jp_r]: # print(side) # print(boneset) # 1. first, validate that start/end exist, these are required # NOTE: remember to prepend 'side' before all jp names! start_jp = side + boneset[0] start_idx = core.my_list_search(pmx.bones, lambda x: x.name_jp == start_jp) if start_idx is None: core.MY_PRINT_FUNC( "ERROR: standard bone '%s' not found in model, this is required!" % start_jp) continue end_jp = side + boneset[1] end_idx = core.my_list_search(pmx.bones, lambda x: x.name_jp == end_jp) if end_idx is None: core.MY_PRINT_FUNC( "ERROR: standard bone '%s' not found in model, this is required!" % end_jp) continue # 2. determine whether the 'preferredparent' exists and therefore what to acutally use as the parent parent_jp = side + boneset[2] parent_idx = core.my_list_search(pmx.bones, lambda x: x.name_jp == parent_jp) if parent_idx is None: parent_idx = start_idx # 3. attempt to collapse known armtwist rig names onto 'parent' so that the base case is further automated # for each bonename in boneset[3], if it exists, collapse onto boneidx parent_idx for bname in boneset[3]: rig_idx = core.my_list_search( pmx.bones, lambda x: x.name_jp == side + bname) if rig_idx is None: continue # if not found, try the next # when it is found, what 'factor' do i use? # print(side+bname) if pmx.bones[rig_idx].inherit_rot and pmx.bones[ rig_idx].inherit_parent_idx == parent_idx and pmx.bones[ rig_idx].inherit_ratio != 0: # if using partial rot inherit AND inheriting from parent_idx AND ratio != 0, use that # think this is good, if twistbones exist they should be children of preferred f = pmx.bones[rig_idx].inherit_ratio elif pmx.bones[rig_idx].parent_idx == parent_idx: # this should be just in case? f = 1 elif pmx.bones[rig_idx].parent_idx == start_idx: # this should catch magic armtwist bones i previously created f = 1 else: core.MY_PRINT_FUNC( "Warning, found unusual relationship when collapsing old armtwist rig, assuming ratio=1" ) f = 1 transfer_bone_weights(pmx, parent_idx, rig_idx, f) pass # also collapse 'start' onto 'preferredparent' if it exists... want to transfer weight from 'arm' to 'armtwist' # if start == preferredparent this does nothing, no harm done transfer_bone_weights(pmx, parent_idx, start_idx, scalefactor=1) # 4. run the weight-cleanup function normalize_weights(pmx) # 5. append 3 new bones to end of bonelist # armYZ gets pos = start pos & parent = start parent basename_jp = pmx.bones[start_idx].name_jp armYZ_new_idx = len(pmx.bones) # armYZ = [basename_jp + yz_suffix, local_translate(basename_jp + yz_suffix)] # name_jp,en # armYZ += pmx[5][start_idx][2:] # copy the whole rest of the bone # armYZ[10:12] = [False, False] # visible=false, enabled=false # armYZ[12:14] = [True, [armYZ_new_idx + 1]] # tail type = tail, tail pointat = armYZend # armYZ[14:19] = [False, False, [], False, []] # disable partial inherit + fixed axis # # local axis is copy # armYZ[21:25] = [False, [], False, []] # disable ext parent + ik armYZ = pmxstruct.PmxBone( name_jp=basename_jp + yz_suffix, name_en=local_translate(basename_jp + yz_suffix), pos=pmx.bones[start_idx].pos, parent_idx=pmx.bones[start_idx].parent_idx, deform_layer=pmx.bones[start_idx].deform_layer, deform_after_phys=pmx.bones[start_idx].deform_after_phys, has_localaxis=True, localaxis_x=pmx.bones[start_idx].localaxis_x, localaxis_z=pmx.bones[start_idx].localaxis_z, tail_type=True, tail=armYZ_new_idx + 1, has_rotate=True, has_translate=True, has_visible=False, has_enabled=True, has_ik=False, inherit_rot=False, inherit_trans=False, has_fixedaxis=False, has_externalparent=False, ) # armYZend gets pos = end pos & parent = armYZ # armYZend = [basename_jp + yz_suffix + "先", local_translate(basename_jp + yz_suffix + "先")] # name_jp,en # armYZend += pmx[5][end_idx][2:] # copy the whole rest of the bone # armYZend[5] = armYZ_new_idx # parent = armYZ # armYZend[10:12] = [False, False] # visible=false, enabled=false # armYZend[12:14] = [True, [-1]] # tail type = tail, tail pointat = none # armYZend[14:19] = [False, False, [], False, []] # disable partial inherit + fixed axis # # local axis is copy # armYZend[21:25] = [False, [], False, []] # disable ext parent + ik armYZend = pmxstruct.PmxBone( name_jp=basename_jp + yz_suffix + "先", name_en=local_translate(basename_jp + yz_suffix + "先"), pos=pmx.bones[end_idx].pos, parent_idx=armYZ_new_idx, deform_layer=pmx.bones[end_idx].deform_layer, deform_after_phys=pmx.bones[end_idx].deform_after_phys, has_localaxis=True, localaxis_x=pmx.bones[end_idx].localaxis_x, localaxis_z=pmx.bones[end_idx].localaxis_z, tail_type=True, tail=-1, has_rotate=True, has_translate=True, has_visible=False, has_enabled=True, has_ik=False, inherit_rot=False, inherit_trans=False, has_fixedaxis=False, has_externalparent=False, ) # # elbowIK gets pos = end pos & parent = end parent # armYZIK = [basename_jp + yz_suffix + "IK", local_translate(basename_jp + yz_suffix + "IK")] # name_jp,en # armYZIK += pmx[5][end_idx][2:] # copy the whole rest of the bone # armYZIK[10:12] = [False, False] # visible=false, enabled=false # armYZIK[12:14] = [True, [-1]] # tail type = tail, tail pointat = none # armYZIK[14:19] = [False, False, [], False, []] # disable partial inherit + fixed axis # # local axis is copy # armYZIK[21:23] = [False, []] # disable ext parent # armYZIK[23] = True # ik=true # # add the ik info: [target, loops, anglelimit, [[link_idx, []], [link_idx, []]] ] # armYZIK[24] = [armYZ_new_idx+1, newik_loops, newik_angle, [[armYZ_new_idx, []]]] armYZIK = pmxstruct.PmxBone( name_jp=basename_jp + yz_suffix + "IK", name_en=local_translate(basename_jp + yz_suffix + "IK"), pos=pmx.bones[end_idx].pos, parent_idx=pmx.bones[end_idx].parent_idx, deform_layer=pmx.bones[end_idx].deform_layer, deform_after_phys=pmx.bones[end_idx].deform_after_phys, has_localaxis=True, localaxis_x=pmx.bones[end_idx].localaxis_x, localaxis_z=pmx.bones[end_idx].localaxis_z, tail_type=True, tail=-1, has_rotate=True, has_translate=True, has_visible=False, has_externalparent=False, has_enabled=True, inherit_rot=False, inherit_trans=False, has_fixedaxis=False, has_ik=True, ik_target_idx=armYZ_new_idx + 1, ik_numloops=newik_loops, ik_angle=newik_angle, ik_links=[pmxstruct.PmxBoneIkLink(idx=armYZ_new_idx)]) # now append them to the bonelist pmx.bones.append(armYZ) pmx.bones.append(armYZend) pmx.bones.append(armYZIK) # 6. build the bezier curve bezier_curve = core.MyBezier(boneset[4][0], boneset[4][1], resolution=50) # 7. find relevant verts & determine unbounded percentile for each (verts, percentiles, centers) = calculate_percentiles(pmx, start_idx, end_idx, parent_idx) if moreinfo: core.MY_PRINT_FUNC( "Blending between bones '{}'/'{}'=ZEROtwist and '{}'/'{}'=FULLtwist" .format(armYZ.name_jp, armYZ.name_en, pmx.bones[parent_idx].name_jp, pmx.bones[parent_idx].name_en)) core.MY_PRINT_FUNC( " Found %d potentially relevant vertices" % len(verts)) # 8. use X or Y to choose border points, print for debugging, also scale the percentiles # first sort ascending by percentile value vert_zip = list(zip(verts, percentiles, centers)) vert_zip.sort(key=lambda x: x[1]) verts, percentiles, centers = zip(*vert_zip) # unzip # X. highest point mode # "liberal" endpoints: extend as far as i can, include all good stuff even if i include some bad stuff with it # start at each end and work inward until i find a vert controlled by only parent_idx i_min_liberal = 0 i_max_liberal = len(verts) - 1 i_min_conserv = -1 i_max_conserv = len(verts) for i_min_liberal in range( 0, len(verts)): # start at head and work down, if pmx.verts[verts[ i_min_liberal]].weighttype == 0: # if the vertex is BDEF1 type, break # then stop looking, p_min_liberal = percentiles[ i_min_liberal] # and save the percentile it found. for i_max_liberal in reversed(range( 0, len(verts))): # start at tail and work up, if pmx.verts[verts[ i_max_liberal]].weighttype == 0: # if the vertex is BDEF1 type, break # then stop looking, p_max_liberal = percentiles[ i_max_liberal] # and save the percentile it found. # Y. lowest highest point mode # "conservative" endpoints: define ends such that no bad stuff exists within bounds, even if i miss some good stuff # start in the middle and work outward until i find a vert NOT controlled by only parent_idx, then back off 1 # where is the middle? use "bisect_left" middle = core.bisect_left(percentiles, 0.5) for i_min_conserv in reversed( range(middle - 1)): # start in middle, work toward head, if pmx.verts[verts[ i_min_conserv]].weighttype != 0: # if the vertex is NOT BDEF1 type, break # then stop looking, i_min_conserv += 1 # and step back 1 to find the last vert that was good BDEF1, p_min_conserv = percentiles[ i_min_conserv] # and save the percentile it found. for i_max_conserv in range( middle + 1, len(verts)): # start in middle, work toward tail, if pmx.verts[verts[ i_max_conserv]].weighttype != 0: # if the vertex is NOT BDEF1 type, break # then stop looking, i_max_conserv -= 1 # and step back 1 to find the last vert that was good BDEF1, p_max_conserv = percentiles[ i_max_conserv] # and save the percentile it found. foobar = False if not (i_min_liberal <= i_min_conserv <= i_max_conserv <= i_max_liberal): core.MY_PRINT_FUNC( "ERROR: bounding indexes do not follow the expected relationship, results may be bad!" ) foobar = True if foobar or moreinfo: core.MY_PRINT_FUNC( " Max liberal bounds: idx = %d to %d, %% = %f to %f" % (i_min_liberal, i_max_liberal, p_min_liberal, p_max_liberal)) core.MY_PRINT_FUNC( " Max conservative bounds: idx = %d to %d, %% = %f to %f" % (i_min_conserv, i_max_conserv, p_min_conserv, p_max_conserv)) # IDEA: WEIGHTED BLEND! sliding scale! avg_factor = core.clamp(ENDPOINT_AVERAGE_FACTOR, 0.0, 1.0) if p_min_liberal != p_min_conserv: p_min = (p_min_liberal * avg_factor) + (p_min_conserv * (1 - avg_factor)) else: p_min = p_min_liberal if p_max_liberal != p_max_conserv: p_max = (p_max_liberal * avg_factor) + (p_max_conserv * (1 - avg_factor)) else: p_max = p_max_liberal # clamp just in case p_min = core.clamp(p_min, 0.0, 1.0) p_max = core.clamp(p_max, 0.0, 1.0) if moreinfo: i_min = core.bisect_left(percentiles, p_min) i_max = core.bisect_left(percentiles, p_max) core.MY_PRINT_FUNC( " Compromise bounds: idx = %d to %d, %% = %f to %f" % (i_min, i_max, p_min, p_max)) # now normalize the percentiles to these endpoints p_len = p_max - p_min percentiles = [(p - p_min) / p_len for p in percentiles] # 9. divide weight between preferredparent (or parent) and armYZ vert_zip = list(zip(verts, percentiles, centers)) num_modified, num_bleeders = divvy_weights( pmx=pmx, vert_zip=vert_zip, axis_limits=(pmx.bones[start_idx].pos, pmx.bones[end_idx].pos), bone_hasweight=parent_idx, bone_getsweight=armYZ_new_idx, bezier=bezier_curve) if moreinfo: core.MY_PRINT_FUNC( " Modified %d verts to use blending, %d are questionable 'bleeding' points" % (num_modified, num_bleeders)) pass pass # 10. run final weight-cleanup normalize_weights(pmx) # 11. write out output_filename_pmx = input_filename_pmx[0:-4] + "_magictwist.pmx" output_filename_pmx = core.get_unused_file_name(output_filename_pmx) pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None
def main(moreinfo=False): core.MY_PRINT_FUNC("Please enter name of PMX model file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") # texture sorting plan: # 1. get startpath = basepath of input PMX # 2. get lists of relevant files # 2a. get list of ALL files within the tree, relative to startpath # 2b. extract top-level 'neighbor' pmx files from all-set # 2c. remove files i intend to ignore (filter by file ext or containing folder) # 3. ask about modifying neighbor PMX # 4. read PMX: either target or target+all neighbor # 5. "categorize files & normalize usages within PMX", NEW FUNC!!! # inputs: list of PMX obj, list of relevant files # outputs: list of structs that bundle all relevant info about the file (replace 2 structs currently used) # for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk # now have all files, know their states! # 6. ask for "aggression level" to control how files will be moved # 7. determine new names for files # this is the big one, slightly different logic for different categories # 8. print proposed names & other findings # for unused files under a folder, combine & replace with *** # 9. ask for confirmation # 10. zip backup (NEW FUNC!) # 11. apply renaming, NEW FUNC! # first try to rename all files # could plausibly fail, if so, set to-name to None/blank # then, in the PMXs, rename all files that didn't fail # absolute path to directory holding the pmx input_filename_pmx_abs = os.path.normpath( os.path.abspath(input_filename_pmx)) startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files relative_all_exist_files = walk_filetree_from_root(startpath) core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files)) # now fill "neighbor_pmx" by finding files without path separator that end in PMX # these are relative paths tho neighbor_pmx = [ f for f in relative_all_exist_files if (f.lower().endswith(".pmx")) and ( os.path.sep not in f) and f != input_filename_pmx_rel ] relevant_exist_files = [] for f in relative_all_exist_files: # ignore all files I expect to find alongside a PMX and don't want to touch or move if f.lower().endswith(IGNORE_FILETYPES): continue # ignore any files living below/inside 'special' folders like "fx/" if match_folder_anylevel(f, IGNORE_FOLDERS, toponly=False): continue # create the list of files we know exist and we know we care about relevant_exist_files.append(f) core.MY_PRINT_FUNC("RELEVANT EXISTING FILES:", len(relevant_exist_files)) core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx)) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # now ask if I care about the neighbors and read the PMXes into memory pmx_filenames = [input_filename_pmx_rel] if neighbor_pmx: core.MY_PRINT_FUNC("") info = [ "Detected %d top-level neighboring PMX files, these probably share the same filebase as the target." % len(neighbor_pmx), "If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.", "Do you want to process all neighbors in addition to the target? (highly recommended)", "1 = Yes, 2 = No" ] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 1: core.MY_PRINT_FUNC("Processing target + all neighbor files") # append neighbor PMX files onto the list of files to be processed pmx_filenames += neighbor_pmx else: core.MY_PRINT_FUNC( "WARNING: Processing only target, ignoring %d neighbor PMX files" % len(neighbor_pmx)) # now read all the PMX objects & store in dict alongside the relative name # dictionary where keys are filename and values are resulting pmx objects all_pmx_obj = {} for this_pmx_name in pmx_filenames: this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name), moreinfo=moreinfo) all_pmx_obj[this_pmx_name] = this_pmx_obj # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk # also fill out how much and how each file is used, and unify dupes between files, all that good stuff filerecord_list = categorize_files(all_pmx_obj, relevant_exist_files, moreinfo) # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # now check which files are used/unused/dont exist # break this into used/notused/notexist lists for simplicity sake # all -> used + notused # used -> used_exist + used_notexist # notused -> notused_img + notused_notimg used, notused = core.my_list_partition(filerecord_list, lambda q: q.numused != 0) used_exist, used_notexist = core.my_list_partition(used, lambda q: q.exists) notused_img, notused_notimg = core.my_list_partition( notused, lambda q: q.name.lower().endswith(IMG_EXT)) core.MY_PRINT_FUNC("PMX TEXTURE SOURCES:", len(used)) if moreinfo: for x in used: core.MY_PRINT_FUNC(" " + str(x)) # now: # all duplicates have been resolved within PMX, including modifying the PMX # all duplicates have been resolved across PMXes # all file exist/notexist status is known # all file used/notused status is known (via numused), or used_pmx # all ways a file is used is known move_toplevel_unused_img = True move_all_unused_img = False # only ask what files to move if there are files that could potentially be moved if notused_img: # count the number of toplevel vs not-toplevel in "notused_img" num_toplevel = len( [p for p in notused_img if (os.path.sep not in p.name)]) num_nontoplevel = len(notused_img) - num_toplevel # ask the user what "aggression" level they want showinfo = [ "Detected %d unused top-level files and %d unused files in directories." % (num_toplevel, num_nontoplevel), "Which files do you want to move to 'unused' folder?", "1 = Do not move any, 2 = Move only top-level unused, 3 = Move all unused" ] c = core.MY_SIMPLECHOICE_FUNC((1, 2, 3), showinfo) if c == 2: move_toplevel_unused_img = True move_all_unused_img = False elif c == 3: move_toplevel_unused_img = True move_all_unused_img = True else: # c == 1: move_toplevel_unused_img = False move_all_unused_img = False # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # DETERMINE NEW NAMES FOR FILES # how to remap: build a list of all destinations (lowercase) to see if any proposed change would lead to collision all_new_names = set() # don't touch the unused_notimg files at all, unless some flag is set # not-used top-level image files get moved to 'unused' folder # also all spa/sph get renamed to .bmp (but remember these are all unused so i don't need to update them in the pmx) for p in notused_img: newname = remove_pattern(p.name) if ((os.path.sep not in p.name) and move_toplevel_unused_img) or move_all_unused_img: # this deserves to be moved to 'unused' folder! newname = os.path.join(FOLDER_UNUSED, os.path.basename(newname)) # ensure the extension is lowercase, for cleanliness dot = newname.rfind(".") newname = newname[:dot] + newname[dot:].lower() if CONVERT_SPA_SPH_TO_BMP and newname.endswith((".spa", ".sph")): newname = newname[:-4] + ".bmp" # if the name I build is not the name it already has, queue it for actual rename if newname != p.name: # resolve potential collisions by adding numbers suffix to file names # first need to make path absolute so get_unused_file_name can check the disk. # then check uniqueness against files on disk and files in namelist (files that WILL be on disk) newname = core.get_unused_file_name(os.path.join( startpath, newname), namelist=all_new_names) # now dest path is guaranteed unique against other existing files & other proposed name changes all_new_names.add(newname.lower()) # make the path no longer absolute: undo adding "startpath" above newname = os.path.relpath(newname, startpath) p.newname = newname # used files get sorted into tex/toon/sph/multi (unless tex and already in a folder that says clothes, etc) # all SPH/SPA get renamed to BMP, used or unused for p in used_exist: newname = remove_pattern(p.name) usage_list = list(p.usage) if len(p.usage) != 1: # this is a rare multiple-use file newname = os.path.join(FOLDER_MULTI, os.path.basename(newname)) elif usage_list[0] == FOLDER_SPH: # this is an sph, duh if not match_folder_anylevel( p.name, KEEP_FOLDERS_SPH, toponly=True): # if its name isn't already good, then move it to my new location newname = os.path.join(FOLDER_SPH, os.path.basename(newname)) elif usage_list[0] == FOLDER_TOON: # this is a toon, duh if not match_folder_anylevel( p.name, KEEP_FOLDERS_TOON, toponly=True): # if its name isn't already good, then move it to my new location newname = os.path.join(FOLDER_TOON, os.path.basename(newname)) elif usage_list[0] == FOLDER_TEX: # if a tex AND already in a folder like body, clothes, wear, tex, etc then keep that folder if not match_folder_anylevel( p.name, KEEP_FOLDERS_TEX, toponly=True): # if its name isn't already good, then move it to my new location newname = os.path.join(FOLDER_TEX, os.path.basename(newname)) # ensure the extension is lowercase, for cleanliness dot = newname.rfind(".") newname = newname[:dot] + newname[dot:].lower() if CONVERT_SPA_SPH_TO_BMP and newname.lower().endswith( (".spa", ".sph")): newname = newname[:-4] + ".bmp" # if the name I build is not the name it already has, queue it for actual rename if newname != p.name: # resolve potential collisions by adding numbers suffix to file names # first need to make path absolute so get_unused_file_name can check the disk. # then check uniqueness against files on disk and files in namelist (files that WILL be on disk) newname = core.get_unused_file_name(os.path.join( startpath, newname), namelist=all_new_names) # now dest path is guaranteed unique against other existing files & other proposed name changes all_new_names.add(newname.lower()) # make the path no longer absolute: undo adding "startpath" above newname = os.path.relpath(newname, startpath) p.newname = newname # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # NOW PRINT MY PROPOSED RENAMINGS and other findings # isolate the ones with proposed renaming used_rename = [u for u in used_exist if u.newname is not None] notused_img_rename = [u for u in notused_img if u.newname is not None] notused_img_norename = [u for u in notused_img if u.newname is None] # bonus goal: if ALL files under a folder are unused, replace its name with a star # first build dict of each dirs to each file any depth below that dir all_dirnames = {} for f in relative_all_exist_files: d = os.path.dirname(f) while d != "": try: all_dirnames[d].append(f) except KeyError: all_dirnames[d] = [f] d = os.path.dirname(d) unused_dirnames = [] all_notused_searchable = [x.name for x in notused_img_norename ] + [x.name for x in notused_notimg] for d, files_under_d in all_dirnames.items(): # if all files beginning with d are notused (either type), this dir can be replaced with * # note: min crashes if input list is empty, but this is guaranteed to not be empty dir_notused = min([(f in all_notused_searchable) for f in files_under_d]) if dir_notused: unused_dirnames.append(d) # print("allundir", unused_dirnames) # now, remove all dirnames that are encompassed by another dirname j = 0 while j < len(unused_dirnames): dj = unused_dirnames[j] k = 0 while k < len(unused_dirnames): dk = unused_dirnames[k] if dj != dk and dk.startswith(dj): unused_dirnames.pop(k) else: k += 1 j += 1 # make sure unused_dirnames has the deepest directories first unused_dirnames = sorted(unused_dirnames, key=lambda y: y.count(os.path.sep), reverse=True) # print("unqundir", unused_dirnames) # then as I go to print notused_img_norename or notused_notimg, collapse them? # for each section, if it exists, print its names sorted first by directory depth then alphabetically (case insensitive) if used_notexist: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC( "Found %d references to images that don't exist (no proposed changes)" % len(used_notexist)) for p in sorted(used_notexist, key=lambda y: sortbydirdepth(y.name)): # print orig name, usage modes, # used, and # files that use it core.MY_PRINT_FUNC(" " + str(p)) if notused_img_norename: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC( "Found %d not-used images in the file tree (no proposed changes)" % len(notused_img_norename)) printme = set() for p in notused_img_norename: # is this notused-file anywhere below any unused dir? t = False for d in unused_dirnames: if p.name.startswith(d): # add this dir, not this file, to the print set printme.add(os.path.join(d, "***")) t = True if not t: # if not encompassed by an unused dir, add the filename printme.add(p.name) # convert set back to sorted list printme = sorted(list(printme), key=sortbydirdepth) for s in printme: core.MY_PRINT_FUNC(" " + s) if notused_notimg: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC( "Found %d not-used not-images in the file tree (no proposed changes)" % len(notused_notimg)) printme = set() for p in notused_notimg: # is this notused-file anywhere below any unused dir? t = False for d in unused_dirnames: if p.name.startswith(d): # add this dir, not this file, to the print set printme.add(os.path.join(d, "***")) t = True if not t: # if not encompassed by an unused dir, add the filename printme.add(p.name) # convert set back to sorted list printme = sorted(list(printme), key=sortbydirdepth) for s in printme: core.MY_PRINT_FUNC(" " + s) # print with all "from" file names left-justified so all the arrows are nicely lined up (unless they use jp characters) longest_name_len = 0 for p in used_rename: longest_name_len = max(longest_name_len, len(p.name)) for p in notused_img_rename: longest_name_len = max(longest_name_len, len(p.name)) if used_rename: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC("Found %d used files to be moved/renamed:" % len(used_rename)) oldname_list = core.MY_JUSTIFY_STRINGLIST( [p.name for p in used_rename]) newname_list = [p.newname for p in used_rename] zipped = list(zip(oldname_list, newname_list)) zipped_and_sorted = sorted(zipped, key=lambda y: sortbydirdepth(y[0])) for o, n in zipped_and_sorted: # print 'from' with the case/separator it uses in the PMX core.MY_PRINT_FUNC(" {:s} --> {:s}".format(o, n)) if notused_img_rename: core.MY_PRINT_FUNC("=" * 60) core.MY_PRINT_FUNC("Found %d not-used images to be moved/renamed:" % len(notused_img_rename)) oldname_list = core.MY_JUSTIFY_STRINGLIST( [p.name for p in notused_img_rename]) newname_list = [p.newname for p in notused_img_rename] zipped = list(zip(oldname_list, newname_list)) zipped_and_sorted = sorted(zipped, key=lambda y: sortbydirdepth(y[0])) for o, n in zipped_and_sorted: # print 'from' with the case/separator it uses in the PMX core.MY_PRINT_FUNC(" {:s} --> {:s}".format(o, n)) core.MY_PRINT_FUNC("=" * 60) if not (used_rename or notused_img_rename): core.MY_PRINT_FUNC("No proposed file changes") core.MY_PRINT_FUNC("Aborting: no files were changed") return None info = [ "Do you accept these new names/locations?", "1 = Yes, 2 = No (abort)" ] r = core.MY_SIMPLECHOICE_FUNC((1, 2), info) if r == 2: core.MY_PRINT_FUNC("Aborting: no files were changed") return None # ========================================================================================================= # ========================================================================================================= # ========================================================================================================= # finally, do the actual renaming: # first, create a backup of the folder if MAKE_BACKUP_BEFORE_RENAMES: r = make_zipfile_backup(startpath, BACKUP_SUFFIX) if not r: # this happens if the backup failed somehow AND the user decided to quit core.MY_PRINT_FUNC("Aborting: no files were changed") return None # do all renaming on disk and in PMXes, and also handle the print statements apply_file_renaming(all_pmx_obj, filerecord_list, startpath) # write out for this_pmx_name, this_pmx_obj in all_pmx_obj.items(): # NOTE: this is OVERWRITING THE PREVIOUS PMX FILE, NOT CREATING A NEW ONE # because I make a zipfile backup I don't need to feel worried about preserving the old version output_filename_pmx = os.path.join(startpath, this_pmx_name) # output_filename_pmx = core.get_unused_file_name(output_filename_pmx) pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None