예제 #1
0
def write_vpd(vpd_filepath: str, vmd: vmdstruct.Vmd, moreinfo=False):
    """
	Grab all bone/morph frames at time=0 in a VMD object and write them to a properly-formatted VPD text file.
	
	:param vpd_filepath: destination filepath/name, relative from CWD or absolute
	:param vmd: input VMD object
	:param moreinfo: if true, get extra printouts with more info about stuff
	"""
    cleanname = core.get_clean_basename(vpd_filepath) + ".vpd"
    core.MY_PRINT_FUNC("Begin writing VPD file '%s'" % cleanname)

    # first, lets partition boneframes & morphframes into those at/notat time=0
    pose_bones, otherbones = core.my_list_partition(vmd.boneframes,
                                                    lambda b: b.f == 0)
    pose_morphs, othermorphs = core.my_list_partition(vmd.morphframes,
                                                      lambda b: b.f == 0)

    # if there are frames not on time=0, raise a warning but continue
    if otherbones or othermorphs:
        core.MY_PRINT_FUNC(
            "Warning: input VMD contains %d frames not at time=0, these will not be captured in the resulting pose!"
            % (len(otherbones) + len(othermorphs)))

    if moreinfo:
        core.MY_PRINT_FUNC("...model name   = JP:'%s'" % vmd.header.modelname)
    # init printlist with magic header, title, and numbones
    printlist = [
        "Vocaloid Pose Data file",
        "",
        "{:s}.osm;".format(vmd.header.modelname),
        "{:d};".format(len(pose_bones)),
        "",
    ]

    # now iterate over all bones
    # bone-floats always have exactly 6 digits
    if moreinfo:
        core.MY_PRINT_FUNC("...# of boneframes          = %d" %
                           len(pose_bones))
    for d, pb in enumerate(pose_bones):
        quat = list(core.euler_to_quaternion(pb.rot))  # returns quat WXYZ
        quat.append(quat.pop(0))  # WXYZ -> XYZW, AKA move head (w) to tail
        newitem = [
            "Bone{:d}{{{:s}".format(d, pb.name),
            "  {:.6f},{:.6f},{:.6f};".format(*pb.pos),
            "  {:.6f},{:.6f},{:.6f},{:.6f};".format(*quat),
            "}",
            "",
        ]
        printlist.extend(newitem)

    # now iterate over all morphs
    # morph-floats are flexible, need to TEST how long they can be!
    # lets say max precision is 3, but strip any trailing zeros and reduce "1." to "1"
    if moreinfo:
        core.MY_PRINT_FUNC("...# of morphframes         = %d" %
                           len(pose_morphs))
    for d, pm in enumerate(pose_morphs):
        newitem = [
            "Morph{:d}{{{:s}".format(d, pm.name),
            "  {:.3f}".format(pm.val).rstrip("0").rstrip(".") + ";", "}", ""
        ]
        printlist.extend(newitem)

    # ok, now i'm done building the printlist! now actually write it!
    core.write_list_to_txtfile(vpd_filepath, printlist, use_jis_encoding=True)
    core.MY_PRINT_FUNC("Done writing VPD file '%s'" % cleanname)

    return None
def main(moreinfo=True):
	# the goal: extract rotation around the "arm" bone local X? axis and transfer it to rotation around the "armtwist" bone local axis
	
	# prompt PMX name
	core.MY_PRINT_FUNC("Please enter name of PMX input file:")
	input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
	pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
	core.MY_PRINT_FUNC("")
	# get bones
	realbones = pmx.bones
	
	twistbone_axes = []
	# then, grab the "twist" bones & save their fixed-rotate axes, if they have them
	# fallback plan: find the arm-to-elbow and elbow-to-wrist unit vectors and use those
	for i in range(len(jp_twistbones)):
		r = core.my_list_search(realbones, lambda x: x.name_jp == jp_twistbones[i], getitem=True)
		if r is None:
			core.MY_PRINT_FUNC("ERROR1: twist bone '{}'({}) cannot be found model, unable to continue. Ensure they use the correct semistandard names, or edit the script to change the JP names it is looking for.".format(jp_twistbones[i], eng_twistbones[i]))
			raise RuntimeError()
		if r.has_fixedaxis:
			# this bone DOES have fixed-axis enabled! use the unit vector in r[18]
			twistbone_axes.append(r.fixedaxis)
		else:
			# i can infer local axis by angle from arm-to-elbow or elbow-to-wrist
			start = core.my_list_search(realbones, lambda x: x.name_jp == jp_sourcebones[i], getitem=True)
			if start is None:
				core.MY_PRINT_FUNC("ERROR2: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_sourcebones[i])
				raise RuntimeError()
			end = core.my_list_search(realbones, lambda x: x.name_jp == jp_pointat_bones[i], getitem=True)
			if end is None:
				core.MY_PRINT_FUNC("ERROR3: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_pointat_bones[i])
				raise RuntimeError()
			start_pos = start.pos
			end_pos = end.pos
			# now have both startpoint and endpoint! find the delta!
			delta = [b - a for a,b in zip(start_pos, end_pos)]
			# normalize to length of 1
			length = core.my_euclidian_distance(delta)
			unit = [t / length for t in delta]
			twistbone_axes.append(unit)
	
	# done extracting axes limits from bone CSV, in list "twistbone_axes"
	core.MY_PRINT_FUNC("...done extracting axis limits from PMX...")
	
	
	###################################################################################
	# prompt VMD file name
	core.MY_PRINT_FUNC("Please enter name of VMD dance input file:")
	input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd")
	
	# next, read/use/prune the dance vmd
	nicelist_in = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo)
	
	# sort boneframes into individual lists: one for each [Larm + Lelbow + Rarm + Relbow] and remove them from the master boneframelist
	# frames for all other bones stay in the master boneframelist
	all_sourcebone_frames = []
	for sourcebone in jp_sourcebones:
		# partition & writeback
		temp, nicelist_in.boneframes = core.my_list_partition(nicelist_in.boneframes, lambda x: x.name == sourcebone)
		# all frames for "sourcebone" get their own sublist here
		all_sourcebone_frames.append(temp)
	
	# verify that there is actually arm/elbow frames to process
	sourcenumframes = sum([len(x) for x in all_sourcebone_frames])
	if sourcenumframes == 0:
		core.MY_PRINT_FUNC("No arm/elbow bone frames are found in the VMD, nothing for me to do!")
		core.MY_PRINT_FUNC("Aborting: no files were changed")
		return None
	else:
		core.MY_PRINT_FUNC("...source contains " + str(sourcenumframes) + " arm/elbow bone frames to decompose...")
	
	if USE_OVERKEY_BANDAID:
		# to fix the path that the arms take during interpolation we need to overkey the frames
		# i.e. create intermediate frames that they should have been passing through already, to FORCE it to take the right path
		# i'm replacing the interpolation curves with actual frames
		for sublist in all_sourcebone_frames:
			newframelist = []
			sublist.sort(key=lambda x: x.f) # ensure they are sorted by frame number
			# for each frame
			for i in range(1, len(sublist)):
				this = sublist[i]
				prev = sublist[i-1]
				# use interpolation curve i to interpolate from i-1 to i
				# first: do i need to do anything or are they already close on the timeline?
				thisframenum = this.f
				prevframenum = prev.f
				if (thisframenum - prevframenum) <= OVERKEY_FRAME_SPACING:
					continue
				# if they are far enough apart that i need to do something,
				thisframequat = core.euler_to_quaternion(this.rot)
				prevframequat = core.euler_to_quaternion(prev.rot)
				# 3, 7, 11, 15 = r_ax, r_ay, r_bx, r_by
				bez = core.MyBezier((this.interp[3], this.interp[7]), (this.interp[11], this.interp[15]), resolution=50)
				# create new frames at these frame numbers, spacing is OVERKEY_FRAME_SPACING
				for interp_framenum in range(prevframenum + OVERKEY_FRAME_SPACING, thisframenum, OVERKEY_FRAME_SPACING):
					# calculate the x time percentage from prev frame to this frame
					x = (interp_framenum - prevframenum) / (thisframenum - prevframenum)
					# apply the interpolation curve to translate X to Y
					y = bez.approximate(x)
					# interpolate from prev to this by amount Y
					interp_quat = core.my_slerp(prevframequat, thisframequat, y)
					# begin building the new frame
					newframe = vmdstruct.VmdBoneFrame(
						name=this.name,  # same name
						f=interp_framenum,  # overwrite frame num
						pos=list(this.pos),  # same pos (but make a copy)
						rot=list(core.quaternion_to_euler(interp_quat)),  # overwrite euler angles
						phys_off=this.phys_off,  # same phys_off
						interp=list(core.bone_interpolation_default_linear)  # overwrite interpolation
					)
					newframelist.append(newframe)
				# overwrite thisframe interp curve with default too
				this.interp = list(core.bone_interpolation_default_linear) # overwrite custom interpolation
			# concat the new frames onto the existing frames for this sublist
			sublist += newframelist
			
	# re-count the number of frames for printing purposes
	totalnumframes = sum([len(x) for x in all_sourcebone_frames])
	overkeyframes = totalnumframes - sourcenumframes
	if overkeyframes != 0:
		core.MY_PRINT_FUNC("...overkeying added " + str(overkeyframes) + " arm/elbow bone frames...")
	core.MY_PRINT_FUNC("...beginning decomposition of " + str(totalnumframes) + " arm/elbow bone frames...")
	
	# now i am completely done reading the VMD file and parsing its data! everything has been distilled down to:
	# all_sourcebone_frames = [Larm, Lelbow, Rarm, Relbow] plus nicelist_in[1]
	
	###################################################################################
	# begin the actual calculations
	
	# output array
	new_twistbone_frames = []
	# progress tracker
	curr_progress = 0
	
	# for each sourcebone & corresponding twistbone,
	for (twistbone, axis_orig, sourcebone_frames) in zip(jp_twistbones, twistbone_axes, all_sourcebone_frames):
		# for each frame of the sourcebone,
		for frame in sourcebone_frames:
			# XYZrot = 567 euler
			quat_in = core.euler_to_quaternion(frame.rot)
			axis = list(axis_orig)	# make a copy to be safe
			
			# "swing twist decomposition"
			# swing = "local" x rotation and nothing else
			# swing = sourcebone, twist = twistbone
			(swing, twist) = swing_twist_decompose(quat_in, axis)
			
			# modify "frame" in-place
			# only modify the XYZrot to use new values
			new_sourcebone_euler = core.quaternion_to_euler(swing)
			frame.rot = list(new_sourcebone_euler)
			
			# create & store new twistbone frame
			# name=twistbone, framenum=copy, XYZpos=copy, XYZrot=new, phys=copy, interp16=copy
			new_twistbone_euler = core.quaternion_to_euler(twist)
			newframe = vmdstruct.VmdBoneFrame(
				name=twistbone,
				f=frame.f,
				pos=list(frame.pos),
				rot=list(new_twistbone_euler),
				phys_off=frame.phys_off,
				interp=list(frame.interp)
			)
			new_twistbone_frames.append(newframe)
			# print progress updates
			curr_progress += 1
			core.print_progress_oneline(curr_progress / totalnumframes)
	
	
	######################################################################
	# done with calculations!
	core.MY_PRINT_FUNC("...done with decomposition, now reassembling output...")
	# attach the list of newly created boneframes, modify the original input
	for sublist in all_sourcebone_frames:
		nicelist_in.boneframes += sublist
	nicelist_in.boneframes += new_twistbone_frames
	
	core.MY_PRINT_FUNC("")
	# write out the VMD
	output_filename_vmd = "%s_twistbones_for_%s.vmd" % \
						   (input_filename_vmd[0:-4], core.get_clean_basename(input_filename_pmx))
	output_filename_vmd = core.get_unused_file_name(output_filename_vmd)
	vmdlib.write_vmd(output_filename_vmd, nicelist_in, moreinfo=moreinfo)
	
	core.MY_PRINT_FUNC("Done!")
	return None
예제 #3
0
def main(moreinfo=False):
    core.MY_PRINT_FUNC("Please enter name of PMX model file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")

    # texture sorting plan:
    # 1. get startpath = basepath of input PMX
    # 2. get lists of relevant files
    # 	2a. get list of ALL files within the tree, relative to startpath
    # 	2b. extract top-level 'neighbor' pmx files from all-set
    # 	2c. remove files i intend to ignore (filter by file ext or containing folder)
    # 3. ask about modifying neighbor PMX
    # 4. read PMX: either target or target+all neighbor
    # 5. "categorize files & normalize usages within PMX", NEW FUNC!!!
    # 	inputs: list of PMX obj, list of relevant files
    # 	outputs: list of structs that bundle all relevant info about the file (replace 2 structs currently used)
    # 	for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk
    # now have all files, know their states!
    # 6. ask for "aggression level" to control how files will be moved
    # 7. determine new names for files
    # 	this is the big one, slightly different logic for different categories
    # 8. print proposed names & other findings
    # 	for unused files under a folder, combine & replace with ***
    # 9. ask for confirmation
    # 10. zip backup (NEW FUNC!)
    # 11. apply renaming, NEW FUNC!
    # 	first try to rename all files
    # 		could plausibly fail, if so, set to-name to None/blank
    # 	then, in the PMXs, rename all files that didn't fail

    # absolute path to directory holding the pmx
    input_filename_pmx_abs = os.path.normpath(
        os.path.abspath(input_filename_pmx))
    startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs)

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files
    relative_all_exist_files = walk_filetree_from_root(startpath)
    core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files))
    # now fill "neighbor_pmx" by finding files without path separator that end in PMX
    # these are relative paths tho
    neighbor_pmx = [
        f for f in relative_all_exist_files
        if (f.lower().endswith(".pmx")) and (
            os.path.sep not in f) and f != input_filename_pmx_rel
    ]

    relevant_exist_files = []
    for f in relative_all_exist_files:
        # ignore all files I expect to find alongside a PMX and don't want to touch or move
        if f.lower().endswith(IGNORE_FILETYPES): continue
        # ignore any files living below/inside 'special' folders like "fx/"
        if match_folder_anylevel(f, IGNORE_FOLDERS, toponly=False): continue
        # create the list of files we know exist and we know we care about
        relevant_exist_files.append(f)

    core.MY_PRINT_FUNC("RELEVANT EXISTING FILES:", len(relevant_exist_files))

    core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx))

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # now ask if I care about the neighbors and read the PMXes into memory

    pmx_filenames = [input_filename_pmx_rel]

    if neighbor_pmx:
        core.MY_PRINT_FUNC("")
        info = [
            "Detected %d top-level neighboring PMX files, these probably share the same filebase as the target."
            % len(neighbor_pmx),
            "If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.",
            "Do you want to process all neighbors in addition to the target? (highly recommended)",
            "1 = Yes, 2 = No"
        ]
        r = core.MY_SIMPLECHOICE_FUNC((1, 2), info)
        if r == 1:
            core.MY_PRINT_FUNC("Processing target + all neighbor files")
            # append neighbor PMX files onto the list of files to be processed
            pmx_filenames += neighbor_pmx
        else:
            core.MY_PRINT_FUNC(
                "WARNING: Processing only target, ignoring %d neighbor PMX files"
                % len(neighbor_pmx))
    # now read all the PMX objects & store in dict alongside the relative name
    # dictionary where keys are filename and values are resulting pmx objects
    all_pmx_obj = {}
    for this_pmx_name in pmx_filenames:
        this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name),
                                       moreinfo=moreinfo)
        all_pmx_obj[this_pmx_name] = this_pmx_obj

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # 	for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk
    #	also fill out how much and how each file is used, and unify dupes between files, all that good stuff

    filerecord_list = categorize_files(all_pmx_obj, relevant_exist_files,
                                       moreinfo)

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # now check which files are used/unused/dont exist

    # break this into used/notused/notexist lists for simplicity sake
    # all -> used + notused
    # used -> used_exist + used_notexist
    # notused -> notused_img + notused_notimg
    used, notused = core.my_list_partition(filerecord_list,
                                           lambda q: q.numused != 0)
    used_exist, used_notexist = core.my_list_partition(used,
                                                       lambda q: q.exists)
    notused_img, notused_notimg = core.my_list_partition(
        notused, lambda q: q.name.lower().endswith(IMG_EXT))

    core.MY_PRINT_FUNC("PMX TEXTURE SOURCES:", len(used))
    if moreinfo:
        for x in used:
            core.MY_PRINT_FUNC("  " + str(x))

    # now:
    # all duplicates have been resolved within PMX, including modifying the PMX
    # all duplicates have been resolved across PMXes
    # all file exist/notexist status is known
    # all file used/notused status is known (via numused), or used_pmx
    # all ways a file is used is known

    move_toplevel_unused_img = True
    move_all_unused_img = False
    # only ask what files to move if there are files that could potentially be moved
    if notused_img:
        # count the number of toplevel vs not-toplevel in "notused_img"
        num_toplevel = len(
            [p for p in notused_img if (os.path.sep not in p.name)])
        num_nontoplevel = len(notused_img) - num_toplevel
        # ask the user what "aggression" level they want
        showinfo = [
            "Detected %d unused top-level files and %d unused files in directories."
            % (num_toplevel, num_nontoplevel),
            "Which files do you want to move to 'unused' folder?",
            "1 = Do not move any, 2 = Move only top-level unused, 3 = Move all unused"
        ]
        c = core.MY_SIMPLECHOICE_FUNC((1, 2, 3), showinfo)
        if c == 2:
            move_toplevel_unused_img = True
            move_all_unused_img = False
        elif c == 3:
            move_toplevel_unused_img = True
            move_all_unused_img = True
        else:  # c == 1:
            move_toplevel_unused_img = False
            move_all_unused_img = False

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # DETERMINE NEW NAMES FOR FILES

    # how to remap: build a list of all destinations (lowercase) to see if any proposed change would lead to collision
    all_new_names = set()

    # don't touch the unused_notimg files at all, unless some flag is set

    # not-used top-level image files get moved to 'unused' folder
    # also all spa/sph get renamed to .bmp (but remember these are all unused so i don't need to update them in the pmx)
    for p in notused_img:
        newname = remove_pattern(p.name)
        if ((os.path.sep not in p.name)
                and move_toplevel_unused_img) or move_all_unused_img:
            # this deserves to be moved to 'unused' folder!
            newname = os.path.join(FOLDER_UNUSED, os.path.basename(newname))

        # ensure the extension is lowercase, for cleanliness
        dot = newname.rfind(".")
        newname = newname[:dot] + newname[dot:].lower()
        if CONVERT_SPA_SPH_TO_BMP and newname.endswith((".spa", ".sph")):
            newname = newname[:-4] + ".bmp"
        # if the name I build is not the name it already has, queue it for actual rename
        if newname != p.name:
            # resolve potential collisions by adding numbers suffix to file names
            # first need to make path absolute so get_unused_file_name can check the disk.
            # then check uniqueness against files on disk and files in namelist (files that WILL be on disk)
            newname = core.get_unused_file_name(os.path.join(
                startpath, newname),
                                                namelist=all_new_names)
            # now dest path is guaranteed unique against other existing files & other proposed name changes
            all_new_names.add(newname.lower())
            # make the path no longer absolute: undo adding "startpath" above
            newname = os.path.relpath(newname, startpath)
            p.newname = newname

    # used files get sorted into tex/toon/sph/multi (unless tex and already in a folder that says clothes, etc)
    # all SPH/SPA get renamed to BMP, used or unused
    for p in used_exist:
        newname = remove_pattern(p.name)
        usage_list = list(p.usage)
        if len(p.usage) != 1:
            # this is a rare multiple-use file
            newname = os.path.join(FOLDER_MULTI, os.path.basename(newname))
        elif usage_list[0] == FOLDER_SPH:
            # this is an sph, duh
            if not match_folder_anylevel(
                    p.name, KEEP_FOLDERS_SPH, toponly=True):
                # if its name isn't already good, then move it to my new location
                newname = os.path.join(FOLDER_SPH, os.path.basename(newname))
        elif usage_list[0] == FOLDER_TOON:
            # this is a toon, duh
            if not match_folder_anylevel(
                    p.name, KEEP_FOLDERS_TOON, toponly=True):
                # if its name isn't already good, then move it to my new location
                newname = os.path.join(FOLDER_TOON, os.path.basename(newname))
        elif usage_list[0] == FOLDER_TEX:
            # if a tex AND already in a folder like body, clothes, wear, tex, etc then keep that folder
            if not match_folder_anylevel(
                    p.name, KEEP_FOLDERS_TEX, toponly=True):
                # if its name isn't already good, then move it to my new location
                newname = os.path.join(FOLDER_TEX, os.path.basename(newname))

        # ensure the extension is lowercase, for cleanliness
        dot = newname.rfind(".")
        newname = newname[:dot] + newname[dot:].lower()
        if CONVERT_SPA_SPH_TO_BMP and newname.lower().endswith(
            (".spa", ".sph")):
            newname = newname[:-4] + ".bmp"
        # if the name I build is not the name it already has, queue it for actual rename
        if newname != p.name:
            # resolve potential collisions by adding numbers suffix to file names
            # first need to make path absolute so get_unused_file_name can check the disk.
            # then check uniqueness against files on disk and files in namelist (files that WILL be on disk)
            newname = core.get_unused_file_name(os.path.join(
                startpath, newname),
                                                namelist=all_new_names)
            # now dest path is guaranteed unique against other existing files & other proposed name changes
            all_new_names.add(newname.lower())
            # make the path no longer absolute: undo adding "startpath" above
            newname = os.path.relpath(newname, startpath)
            p.newname = newname

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # NOW PRINT MY PROPOSED RENAMINGS and other findings

    # isolate the ones with proposed renaming
    used_rename = [u for u in used_exist if u.newname is not None]
    notused_img_rename = [u for u in notused_img if u.newname is not None]
    notused_img_norename = [u for u in notused_img if u.newname is None]

    # bonus goal: if ALL files under a folder are unused, replace its name with a star
    # first build dict of each dirs to each file any depth below that dir
    all_dirnames = {}
    for f in relative_all_exist_files:
        d = os.path.dirname(f)
        while d != "":
            try:
                all_dirnames[d].append(f)
            except KeyError:
                all_dirnames[d] = [f]
            d = os.path.dirname(d)
    unused_dirnames = []
    all_notused_searchable = [x.name for x in notused_img_norename
                              ] + [x.name for x in notused_notimg]
    for d, files_under_d in all_dirnames.items():
        # if all files beginning with d are notused (either type), this dir can be replaced with *
        # note: min crashes if input list is empty, but this is guaranteed to not be empty
        dir_notused = min([(f in all_notused_searchable)
                           for f in files_under_d])
        if dir_notused:
            unused_dirnames.append(d)
    # print("allundir", unused_dirnames)
    # now, remove all dirnames that are encompassed by another dirname
    j = 0
    while j < len(unused_dirnames):
        dj = unused_dirnames[j]
        k = 0
        while k < len(unused_dirnames):
            dk = unused_dirnames[k]
            if dj != dk and dk.startswith(dj):
                unused_dirnames.pop(k)
            else:
                k += 1
        j += 1
    # make sure unused_dirnames has the deepest directories first
    unused_dirnames = sorted(unused_dirnames,
                             key=lambda y: y.count(os.path.sep),
                             reverse=True)
    # print("unqundir", unused_dirnames)
    # then as I go to print notused_img_norename or notused_notimg, collapse them?

    # for each section, if it exists, print its names sorted first by directory depth then alphabetically (case insensitive)

    if used_notexist:
        core.MY_PRINT_FUNC("=" * 60)
        core.MY_PRINT_FUNC(
            "Found %d references to images that don't exist (no proposed changes)"
            % len(used_notexist))
        for p in sorted(used_notexist, key=lambda y: sortbydirdepth(y.name)):
            # print orig name, usage modes, # used, and # files that use it
            core.MY_PRINT_FUNC("   " + str(p))
    if notused_img_norename:
        core.MY_PRINT_FUNC("=" * 60)
        core.MY_PRINT_FUNC(
            "Found %d not-used images in the file tree (no proposed changes)" %
            len(notused_img_norename))
        printme = set()
        for p in notused_img_norename:
            # is this notused-file anywhere below any unused dir?
            t = False
            for d in unused_dirnames:
                if p.name.startswith(d):
                    # add this dir, not this file, to the print set
                    printme.add(os.path.join(d, "***"))
                    t = True
            if not t:
                # if not encompassed by an unused dir, add the filename
                printme.add(p.name)
        # convert set back to sorted list
        printme = sorted(list(printme), key=sortbydirdepth)
        for s in printme:
            core.MY_PRINT_FUNC("   " + s)
    if notused_notimg:
        core.MY_PRINT_FUNC("=" * 60)
        core.MY_PRINT_FUNC(
            "Found %d not-used not-images in the file tree (no proposed changes)"
            % len(notused_notimg))
        printme = set()
        for p in notused_notimg:
            # is this notused-file anywhere below any unused dir?
            t = False
            for d in unused_dirnames:
                if p.name.startswith(d):
                    # add this dir, not this file, to the print set
                    printme.add(os.path.join(d, "***"))
                    t = True
            if not t:
                # if not encompassed by an unused dir, add the filename
                printme.add(p.name)
        # convert set back to sorted list
        printme = sorted(list(printme), key=sortbydirdepth)
        for s in printme:
            core.MY_PRINT_FUNC("   " + s)
    # print with all "from" file names left-justified so all the arrows are nicely lined up (unless they use jp characters)
    longest_name_len = 0
    for p in used_rename:
        longest_name_len = max(longest_name_len, len(p.name))
    for p in notused_img_rename:
        longest_name_len = max(longest_name_len, len(p.name))
    if used_rename:
        core.MY_PRINT_FUNC("=" * 60)
        core.MY_PRINT_FUNC("Found %d used files to be moved/renamed:" %
                           len(used_rename))
        oldname_list = core.MY_JUSTIFY_STRINGLIST(
            [p.name for p in used_rename])
        newname_list = [p.newname for p in used_rename]
        zipped = list(zip(oldname_list, newname_list))
        zipped_and_sorted = sorted(zipped, key=lambda y: sortbydirdepth(y[0]))
        for o, n in zipped_and_sorted:
            # print 'from' with the case/separator it uses in the PMX
            core.MY_PRINT_FUNC("   {:s} --> {:s}".format(o, n))
    if notused_img_rename:
        core.MY_PRINT_FUNC("=" * 60)
        core.MY_PRINT_FUNC("Found %d not-used images to be moved/renamed:" %
                           len(notused_img_rename))
        oldname_list = core.MY_JUSTIFY_STRINGLIST(
            [p.name for p in notused_img_rename])
        newname_list = [p.newname for p in notused_img_rename]
        zipped = list(zip(oldname_list, newname_list))
        zipped_and_sorted = sorted(zipped, key=lambda y: sortbydirdepth(y[0]))
        for o, n in zipped_and_sorted:
            # print 'from' with the case/separator it uses in the PMX
            core.MY_PRINT_FUNC("   {:s} --> {:s}".format(o, n))
    core.MY_PRINT_FUNC("=" * 60)

    if not (used_rename or notused_img_rename):
        core.MY_PRINT_FUNC("No proposed file changes")
        core.MY_PRINT_FUNC("Aborting: no files were changed")
        return None

    info = [
        "Do you accept these new names/locations?", "1 = Yes, 2 = No (abort)"
    ]
    r = core.MY_SIMPLECHOICE_FUNC((1, 2), info)
    if r == 2:
        core.MY_PRINT_FUNC("Aborting: no files were changed")
        return None

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # finally, do the actual renaming:

    # first, create a backup of the folder
    if MAKE_BACKUP_BEFORE_RENAMES:
        r = make_zipfile_backup(startpath, BACKUP_SUFFIX)
        if not r:
            # this happens if the backup failed somehow AND the user decided to quit
            core.MY_PRINT_FUNC("Aborting: no files were changed")
            return None

    # do all renaming on disk and in PMXes, and also handle the print statements
    apply_file_renaming(all_pmx_obj, filerecord_list, startpath)

    # write out
    for this_pmx_name, this_pmx_obj in all_pmx_obj.items():
        # NOTE: this is OVERWRITING THE PREVIOUS PMX FILE, NOT CREATING A NEW ONE
        # because I make a zipfile backup I don't need to feel worried about preserving the old version
        output_filename_pmx = os.path.join(startpath, this_pmx_name)
        # output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
        pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo)

    core.MY_PRINT_FUNC("Done!")
    return None
예제 #4
0
def translate_to_english(pmx: pmxstruct.Pmx, moreinfo=False):
    # for each category,
    # 	for each name,
    # 		check for type 0/1/2 (already good, copy JP, exact match in special dict)
    # 		create translate_entry regardless what happens
    # do same thing for model name
    # then for all that didn't get successfully translated,
    # do bulk local piecewise translate: list(str) -> list(str)
    # then for all that didn't get successfully translated,
    # do bulk google piecewise translate: list(str) -> list(str)
    # then sort the results
    # then format & print the results

    # step zero: set up the translator thingy
    init_googletrans()

    # if JP model name is empty, give it something. same for comment
    if pmx.header.name_jp == "":
        pmx.header.name_jp = "model"
    if pmx.header.comment_jp == "":
        pmx.header.comment_jp = "comment"

    translate_maps = []

    # repeat the following for each category of visible names:
    # materials=4, bones=5, morphs=6, dispframe=7
    cat_id_list = list(range(4, 8))
    category_list = [pmx.materials, pmx.bones, pmx.morphs, pmx.frames]
    for cat_id, category in zip(cat_id_list, category_list):
        # for each entry:
        for d, item in enumerate(category):
            # skip "special" display frames
            if isinstance(item, pmxstruct.PmxFrame) and item.is_special:
                continue
            # strip away newline and return just in case, i saw a few examples where they showed up
            item.name_jp = item.name_jp.replace('\r', '').replace('\n', '')
            item.name_en = item.name_en.replace('\r', '').replace('\n', '')
            # try to apply "easy" translate methods
            newname, source = easy_translate(item.name_jp, item.name_en,
                                             specificdict_dict[cat_id])
            # build the "trans_entry" item from this result, regardless of pass/fail
            newentry = translate_entry(item.name_jp, item.name_en, cat_id, d,
                                       newname, source)
            # store it
            translate_maps.append(newentry)

    # model name is special cuz there's only one & its indexing is different
    # but i'm doing the same stuff
    pmx.header.name_jp = pmx.header.name_jp.replace('\r', '').replace('\n', '')
    pmx.header.name_en = pmx.header.name_en.replace('\r', '').replace('\n', '')
    # try to apply "easy" translate methods
    newname, source = easy_translate(pmx.header.name_jp, pmx.header.name_en,
                                     None)
    # build the "trans_entry" item from this result, regardless of pass/fail
    newentry = translate_entry(pmx.header.name_jp, pmx.header.name_en, 0, 2,
                               newname, source)
    # store it
    translate_maps.append(newentry)

    if TRANSLATE_MODEL_COMMENT:
        # here, attempt to match model comment with type0 (already good) or type1 (copy JP)
        newcomment, newcommentsource = easy_translate(pmx.header.comment_jp,
                                                      pmx.header.comment_en,
                                                      None)
    else:
        newcomment = pmx.header.comment_en
        newcommentsource = 0  # 0 means kept good aka nochange

    # now I have all the translateable items (except for model comment) collected in one list
    # partition the list into done and notdone
    translate_maps, translate_notdone = core.my_list_partition(
        translate_maps, lambda x: x.trans_type != -1)
    ########
    # actually do local translate
    local_results = translation_tools.local_translate(
        [item.jp_old for item in translate_notdone])
    # determine if each item passed or not, update the en_new and trans_type fields
    for item, result in zip(translate_notdone, local_results):
        if not translation_tools.needs_translate(result):
            item.en_new = result
            item.trans_type = 3
    # grab the newly-done items and move them to the done list
    translate_done2, translate_notdone = core.my_list_partition(
        translate_notdone, lambda x: x.trans_type != -1)
    translate_maps.extend(translate_done2)
    ########
    if not PREFER_EXISTING_ENGLISH_NAME:
        # if i chose to anti-prefer the existing EN name, then it is still preferred over google and should be checked here
        for item in translate_notdone:
            # first, if en name is already good (not blank and not JP), just keep it
            if item.en_old and not item.en_old.isspace() and item.en_old.lower() not in FORBIDDEN_ENGLISH_NAMES \
              and not translation_tools.needs_translate(item.en_old):
                item.en_new = item.en_old
                item.trans_type = 0
        # transfer the newly-done things over to the translate_maps list
        translate_done2, translate_notdone = core.my_list_partition(
            translate_notdone, lambda x: x.trans_type != -1)
        translate_maps.extend(translate_done2)

    ########
    # actually do google translate
    num_items = len(translate_notdone) + (newcommentsource != 0)
    if num_items:
        core.MY_PRINT_FUNC(
            "... identified %d items that need Internet translation..." %
            num_items)
        try:
            google_results = google_translate(
                [item.jp_old for item in translate_notdone])
            # determine if each item passed or not, update the en_new and trans_type fields
            for item, result in zip(translate_notdone, google_results):
                # always accept the google result, pass or fail it's the best i've got
                item.en_new = result
                # determine whether it passed or failed for display purposes
                # failure probably due to unusual geometric symbols, not due to japanese text
                if translation_tools.needs_translate(result):
                    item.trans_type = -1
                else:
                    item.trans_type = 4
            # grab the newly-done items and move them to the done list
            translate_maps.extend(translate_notdone)
            # comment!
            if TRANSLATE_MODEL_COMMENT and newcommentsource == -1:  # -1 = pending, 0 = did nothing, 4 = did something
                # if i am going to translate the comment, but was unable to do it earlier, then do it now
                core.MY_PRINT_FUNC("Now translating model comment")
                comment_clean = pmx.header.comment_jp.replace(
                    "\r", "")  # delete these \r chars, google doesnt want them
                comment_clean = comment_clean.strip(
                )  # trim leading/trailing whitespace too
                ########
                # actually do google translate
                if check_translate_budget(1):
                    newcomment = _single_google_translate(comment_clean)
                    newcomment = newcomment.replace(
                        '\n', '\r\n')  # put back the /r/n, MMD needs them
                    newcommentsource = 4
                else:
                    # no budget for just one more? oh well, no change
                    newcomment = pmx.header.comment_en
                    newcommentsource = 0
        except Exception as e:
            core.MY_PRINT_FUNC(e.__class__.__name__, e)
            core.MY_PRINT_FUNC(
                "Internet translate unexpectedly failed, attempting to recover..."
            )
            # for each in translate-notdone, set status to fail, set newname to oldname (so it won't change)
            for item in translate_notdone:
                item.trans_type = -1
                item.en_new = item.en_old
            # append to translate_maps
            translate_maps.extend(translate_notdone)

    ###########################################
    # done translating!!!!!
    ###########################################

    # sanity check: if old result matches new result, then force type to be nochange
    # only relevant if PREFER_EXISTING_ENGLISH_NAME = False
    for m in translate_maps:
        if m.en_old == m.en_new and m.trans_type not in (-1, 0):
            m.trans_type = 0
    # now, determine if i actually changed anything at all before bothering to try applying stuff
    type_fail, temp = core.my_list_partition(translate_maps,
                                             lambda x: x.trans_type == -1)
    type_good, temp = core.my_list_partition(temp, lambda x: x.trans_type == 0)
    type_copy, temp = core.my_list_partition(temp, lambda x: x.trans_type == 1)
    type_exact, temp = core.my_list_partition(temp,
                                              lambda x: x.trans_type == 2)
    type_local, temp = core.my_list_partition(temp,
                                              lambda x: x.trans_type == 3)
    type_google = temp
    # number of things I could have translated
    total_fields = len(translate_maps) + int(TRANSLATE_MODEL_COMMENT)
    # number of things that weren't already good (includes changed and fail)
    total_changed = total_fields - len(type_good) - int(newcommentsource != 0)
    if type_fail:
        # warn about any strings that failed translation
        core.MY_PRINT_FUNC(
            "WARNING: %d items were unable to be translated, try running the script again or doing translation manually."
            % len(type_fail))
    if total_changed == 0:
        core.MY_PRINT_FUNC("No changes are required")
        return pmx, False

    ###########################################
    # next, apply!
    # comment
    if TRANSLATE_MODEL_COMMENT and newcommentsource != 0:
        pmx.header.comment_en = newcomment
    # everything else: iterate over all entries, write when anything has type != 0
    for item in translate_maps:
        # writeback any source except "nochange"
        # even writeback fail type, because fail will be my best-effort translation
        # if its being translated thats cuz old_en is bad, so im not making it any worse
        # failure probably due to unusual geometric symbols, not due to japanese text
        if item.trans_type != 0:
            if item.cat_id == 0:  # this is header-type, meaning this is model name
                pmx.header.name_en = item.en_new
            elif item.cat_id == 4:
                pmx.materials[item.idx].name_en = item.en_new
            elif item.cat_id == 5:
                pmx.bones[item.idx].name_en = item.en_new
            elif item.cat_id == 6:
                pmx.morphs[item.idx].name_en = item.en_new
            elif item.cat_id == 7:
                pmx.frames[item.idx].name_en = item.en_new
            else:
                core.MY_PRINT_FUNC(
                    "ERROR: translate_map has invalid cat_id=%s, how the hell did that happen?"
                    % str(item.cat_id))

    ###########################################
    # next, print info!
    core.MY_PRINT_FUNC(
        "Translated {} / {} = {:.1%} english fields in the model".format(
            total_changed, total_fields, total_changed / total_fields))
    if moreinfo or type_fail:
        # give full breakdown of each source if requested OR if any fail
        core.MY_PRINT_FUNC(
            "Total fields={}, nochange={}, copy={}, exactmatch={}, piecewise={}, Google={}, fail={}"
            .format(total_fields, len(type_good), len(type_copy),
                    len(type_exact), len(type_local), len(type_google),
                    len(type_fail)))
        #########
        # now print the table of before/after/etc
        if moreinfo:
            if SHOW_ALL_CHANGED_FIELDS:
                # show everything that isn't nochange
                maps_printme = [
                    item for item in translate_maps if item.trans_type != 0
                ]
            else:
                # hide good/copyJP/exactmatch cuz those are uninteresting and guaranteed to be safe
                # only show piecewise and google translations and fails
                maps_printme = [
                    item for item in translate_maps
                    if item.trans_type > 2 or item.trans_type == -1
                ]
        else:
            # if moreinfo not enabled, only show fails
            maps_printme = type_fail
        if maps_printme:
            # first, SORT THE LIST! print items in PMXE order
            maps_printme.sort(key=lambda x: x.idx)
            maps_printme.sort(key=lambda x: x.cat_id)
            # then, justify each column
            # columns: category, idx, trans_type, en_old, en_new, jp_old = 6 types
            # bone  15  google || EN: 'asdf' --> 'foobar' || JP: 'fffFFFff'
            just_cat = core.MY_JUSTIFY_STRINGLIST(
                [category_dict[vv.cat_id] for vv in maps_printme])
            just_idx = core.MY_JUSTIFY_STRINGLIST(
                [str(vv.idx) for vv in maps_printme],
                right=True)  # this is right-justify, all others are left
            just_source = core.MY_JUSTIFY_STRINGLIST(
                [type_dict[vv.trans_type] for vv in maps_printme])
            just_enold = core.MY_JUSTIFY_STRINGLIST(
                ["'%s'" % vv.en_old for vv in maps_printme])
            just_ennew = core.MY_JUSTIFY_STRINGLIST(
                ["'%s'" % vv.en_new for vv in maps_printme])
            just_jpold = ["'%s'" % vv.jp_old for vv in maps_printme
                          ]  # no justify needed for final item

            # now pretty-print the list of translations:
            for args in zip(just_cat, just_idx, just_source, just_enold,
                            just_ennew, just_jpold):
                core.MY_PRINT_FUNC(
                    "{} {} {} || EN: {} --> {} || JP: {}".format(*args))

    ###########################################
    # next, return!
    return pmx, True