def alphamorph_correct(pmx: pmxstruct.Pmx, moreinfo=False):
    num_fixed = 0
    total_morphs_affected = 0

    # for each morph:
    for d, morph in enumerate(pmx.morphs):
        # if not a material morph, skip it
        if morph.morphtype != pmxstruct.MorphType.MATERIAL: continue
        this_num_fixed = 0
        # for each material in this material morph:
        for dd, matitem in enumerate(morph.items):
            matitem: pmxstruct.PmxMorphItemMaterial  # type annotation for pycharm
            # if (mult opacity by 0) OR (add -1 to opacity), then this item is (trying to) hide the target material
            if (not matitem.is_add
                    and matitem.alpha == 0) or (matitem.is_add
                                                and matitem.alpha == -1):
                if not (-1 <= matitem.mat_idx < len(pmx.materials)):
                    core.MY_PRINT_FUNC(
                        "Warning: material morph %d item %d uses invalid material index %d, skipping"
                        % (d, dd, matitem.mat_idx))
                    continue
                # then replace the entire set of material-morph parameters
                # opacity, edge size, edge alpha, tex, toon, sph
                if matitem.mat_idx != -1 and pmx.materials[
                        matitem.mat_idx].alpha == 0:
                    # if the target material is initially transparent, replace with add-negative-1
                    t = template_minusone
                else:
                    # if the target material is initally opaque, or targeting the whole model, replace with mult-by-0
                    t = template
                if matitem.list()[1:] != t.list(
                )[1:]:  # if it is not already good,
                    newitem = copy.deepcopy(t)
                    newitem.mat_idx = matitem.mat_idx
                    morph.items[
                        dd] = newitem  # replace the morph with the template
                    this_num_fixed += 1

        if this_num_fixed != 0:
            total_morphs_affected += 1
            num_fixed += this_num_fixed
            if moreinfo:
                core.MY_PRINT_FUNC(
                    "morph #{:<3} JP='{}' / EN='{}', fixed {} items".format(
                        d, morph.name_jp, morph.name_en, this_num_fixed))

    if num_fixed:
        core.MY_PRINT_FUNC("Fixed %d 'hide' morphs" % total_morphs_affected)

    # identify materials that start transparent but still have edging
    mats_fixed = 0
    for d, mat in enumerate(pmx.materials):
        # if opacity is zero AND edge is enabled AND edge has nonzero opacity AND edge has nonzero size
        if mat.alpha == 0 \
          and pmxstruct.MaterialFlags.USE_EDGING in mat.matflags \
          and mat.edgealpha != 0 \
          and mat.edgesize != 0:
            this_num_edgefixed = 0
            # THEN check for any material morphs that add opacity to this material
            for d2, morph in enumerate(pmx.morphs):
                # if not a material morph, skip it
                if morph.morphtype != pmxstruct.MorphType.MATERIAL: continue
                # for each material in this material morph:
                for matitem in morph.items:
                    # if not operating on the right material, skip it
                    if matitem.mat_idx != d: continue
                    # if adding and opacity > 0:
                    if matitem.is_add == 1 and matitem.alpha > 0:
                        # set it to add the edge amounts from the material
                        matitem.edgealpha = mat.edgealpha
                        matitem.edgesize = mat.edgesize
                        this_num_edgefixed += 1
            # done looping over morphs
            # if it modified any locations, zero out the edge params in the material
            if this_num_edgefixed != 0:
                mat.edgealpha = 0
                mat.edgesize = 0
                num_fixed += this_num_edgefixed
                mats_fixed += 1
                if moreinfo:
                    core.MY_PRINT_FUNC(
                        "mat #{:<3} JP='{}' / EN='{}', fixed {} appear morphs".
                        format(d, mat.name_jp, mat.name_en,
                               this_num_edgefixed))

    if mats_fixed:
        core.MY_PRINT_FUNC(
            "Removed edging from %d initially hidden materials" % mats_fixed)

    if num_fixed == 0 and mats_fixed == 0:
        core.MY_PRINT_FUNC("No changes are required")
        return pmx, False

    return pmx, True
Beispiel #2
0
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
    return None


def main():
    showhelp()
    pmx, name = showprompt()
    pmx, is_changed = uniquify_names(pmx)
    if is_changed:
        end(pmx, name)
    core.pause_and_quit("Done with everything! Goodbye!")


if __name__ == '__main__':
    core.MY_PRINT_FUNC("Nuthouse01 - 08/24/2020 - v5.00")
    if DEBUG:
        main()
    else:
        try:
            main()
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
        except Exception as ee:
            # if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
            core.MY_PRINT_FUNC(ee)
            core.pause_and_quit(
                "ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho"
            )
def main(moreinfo=True):
	# the goal: extract rotation around the "arm" bone local X? axis and transfer it to rotation around the "armtwist" bone local axis
	
	# prompt PMX name
	core.MY_PRINT_FUNC("Please enter name of PMX input file:")
	input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
	pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
	core.MY_PRINT_FUNC("")
	# get bones
	realbones = pmx.bones
	
	twistbone_axes = []
	# then, grab the "twist" bones & save their fixed-rotate axes, if they have them
	# fallback plan: find the arm-to-elbow and elbow-to-wrist unit vectors and use those
	for i in range(len(jp_twistbones)):
		r = core.my_list_search(realbones, lambda x: x.name_jp == jp_twistbones[i], getitem=True)
		if r is None:
			core.MY_PRINT_FUNC("ERROR1: twist bone '{}'({}) cannot be found model, unable to continue. Ensure they use the correct semistandard names, or edit the script to change the JP names it is looking for.".format(jp_twistbones[i], eng_twistbones[i]))
			raise RuntimeError()
		if r.has_fixedaxis:
			# this bone DOES have fixed-axis enabled! use the unit vector in r[18]
			twistbone_axes.append(r.fixedaxis)
		else:
			# i can infer local axis by angle from arm-to-elbow or elbow-to-wrist
			start = core.my_list_search(realbones, lambda x: x.name_jp == jp_sourcebones[i], getitem=True)
			if start is None:
				core.MY_PRINT_FUNC("ERROR2: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_sourcebones[i])
				raise RuntimeError()
			end = core.my_list_search(realbones, lambda x: x.name_jp == jp_pointat_bones[i], getitem=True)
			if end is None:
				core.MY_PRINT_FUNC("ERROR3: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_pointat_bones[i])
				raise RuntimeError()
			start_pos = start.pos
			end_pos = end.pos
			# now have both startpoint and endpoint! find the delta!
			delta = [b - a for a,b in zip(start_pos, end_pos)]
			# normalize to length of 1
			length = core.my_euclidian_distance(delta)
			unit = [t / length for t in delta]
			twistbone_axes.append(unit)
	
	# done extracting axes limits from bone CSV, in list "twistbone_axes"
	core.MY_PRINT_FUNC("...done extracting axis limits from PMX...")
	
	
	###################################################################################
	# prompt VMD file name
	core.MY_PRINT_FUNC("Please enter name of VMD dance input file:")
	input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd")
	
	# next, read/use/prune the dance vmd
	nicelist_in = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo)
	
	# sort boneframes into individual lists: one for each [Larm + Lelbow + Rarm + Relbow] and remove them from the master boneframelist
	# frames for all other bones stay in the master boneframelist
	all_sourcebone_frames = []
	for sourcebone in jp_sourcebones:
		# partition & writeback
		temp, nicelist_in.boneframes = core.my_list_partition(nicelist_in.boneframes, lambda x: x.name == sourcebone)
		# all frames for "sourcebone" get their own sublist here
		all_sourcebone_frames.append(temp)
	
	# verify that there is actually arm/elbow frames to process
	sourcenumframes = sum([len(x) for x in all_sourcebone_frames])
	if sourcenumframes == 0:
		core.MY_PRINT_FUNC("No arm/elbow bone frames are found in the VMD, nothing for me to do!")
		core.MY_PRINT_FUNC("Aborting: no files were changed")
		return None
	else:
		core.MY_PRINT_FUNC("...source contains " + str(sourcenumframes) + " arm/elbow bone frames to decompose...")
	
	if USE_OVERKEY_BANDAID:
		# to fix the path that the arms take during interpolation we need to overkey the frames
		# i.e. create intermediate frames that they should have been passing through already, to FORCE it to take the right path
		# i'm replacing the interpolation curves with actual frames
		for sublist in all_sourcebone_frames:
			newframelist = []
			sublist.sort(key=lambda x: x.f) # ensure they are sorted by frame number
			# for each frame
			for i in range(1, len(sublist)):
				this = sublist[i]
				prev = sublist[i-1]
				# use interpolation curve i to interpolate from i-1 to i
				# first: do i need to do anything or are they already close on the timeline?
				thisframenum = this.f
				prevframenum = prev.f
				if (thisframenum - prevframenum) <= OVERKEY_FRAME_SPACING:
					continue
				# if they are far enough apart that i need to do something,
				thisframequat = core.euler_to_quaternion(this.rot)
				prevframequat = core.euler_to_quaternion(prev.rot)
				# 3, 7, 11, 15 = r_ax, r_ay, r_bx, r_by
				bez = core.MyBezier((this.interp[3], this.interp[7]), (this.interp[11], this.interp[15]), resolution=50)
				# create new frames at these frame numbers, spacing is OVERKEY_FRAME_SPACING
				for interp_framenum in range(prevframenum + OVERKEY_FRAME_SPACING, thisframenum, OVERKEY_FRAME_SPACING):
					# calculate the x time percentage from prev frame to this frame
					x = (interp_framenum - prevframenum) / (thisframenum - prevframenum)
					# apply the interpolation curve to translate X to Y
					y = bez.approximate(x)
					# interpolate from prev to this by amount Y
					interp_quat = core.my_slerp(prevframequat, thisframequat, y)
					# begin building the new frame
					newframe = vmdstruct.VmdBoneFrame(
						name=this.name,  # same name
						f=interp_framenum,  # overwrite frame num
						pos=list(this.pos),  # same pos (but make a copy)
						rot=list(core.quaternion_to_euler(interp_quat)),  # overwrite euler angles
						phys_off=this.phys_off,  # same phys_off
						interp=list(core.bone_interpolation_default_linear)  # overwrite interpolation
					)
					newframelist.append(newframe)
				# overwrite thisframe interp curve with default too
				this.interp = list(core.bone_interpolation_default_linear) # overwrite custom interpolation
			# concat the new frames onto the existing frames for this sublist
			sublist += newframelist
			
	# re-count the number of frames for printing purposes
	totalnumframes = sum([len(x) for x in all_sourcebone_frames])
	overkeyframes = totalnumframes - sourcenumframes
	if overkeyframes != 0:
		core.MY_PRINT_FUNC("...overkeying added " + str(overkeyframes) + " arm/elbow bone frames...")
	core.MY_PRINT_FUNC("...beginning decomposition of " + str(totalnumframes) + " arm/elbow bone frames...")
	
	# now i am completely done reading the VMD file and parsing its data! everything has been distilled down to:
	# all_sourcebone_frames = [Larm, Lelbow, Rarm, Relbow] plus nicelist_in[1]
	
	###################################################################################
	# begin the actual calculations
	
	# output array
	new_twistbone_frames = []
	# progress tracker
	curr_progress = 0
	
	# for each sourcebone & corresponding twistbone,
	for (twistbone, axis_orig, sourcebone_frames) in zip(jp_twistbones, twistbone_axes, all_sourcebone_frames):
		# for each frame of the sourcebone,
		for frame in sourcebone_frames:
			# XYZrot = 567 euler
			quat_in = core.euler_to_quaternion(frame.rot)
			axis = list(axis_orig)	# make a copy to be safe
			
			# "swing twist decomposition"
			# swing = "local" x rotation and nothing else
			# swing = sourcebone, twist = twistbone
			(swing, twist) = swing_twist_decompose(quat_in, axis)
			
			# modify "frame" in-place
			# only modify the XYZrot to use new values
			new_sourcebone_euler = core.quaternion_to_euler(swing)
			frame.rot = list(new_sourcebone_euler)
			
			# create & store new twistbone frame
			# name=twistbone, framenum=copy, XYZpos=copy, XYZrot=new, phys=copy, interp16=copy
			new_twistbone_euler = core.quaternion_to_euler(twist)
			newframe = vmdstruct.VmdBoneFrame(
				name=twistbone,
				f=frame.f,
				pos=list(frame.pos),
				rot=list(new_twistbone_euler),
				phys_off=frame.phys_off,
				interp=list(frame.interp)
			)
			new_twistbone_frames.append(newframe)
			# print progress updates
			curr_progress += 1
			core.print_progress_oneline(curr_progress / totalnumframes)
	
	
	######################################################################
	# done with calculations!
	core.MY_PRINT_FUNC("...done with decomposition, now reassembling output...")
	# attach the list of newly created boneframes, modify the original input
	for sublist in all_sourcebone_frames:
		nicelist_in.boneframes += sublist
	nicelist_in.boneframes += new_twistbone_frames
	
	core.MY_PRINT_FUNC("")
	# write out the VMD
	output_filename_vmd = "%s_twistbones_for_%s.vmd" % \
						   (input_filename_vmd[0:-4], core.get_clean_basename(input_filename_pmx))
	output_filename_vmd = core.get_unused_file_name(output_filename_vmd)
	vmdlib.write_vmd(output_filename_vmd, nicelist_in, moreinfo=moreinfo)
	
	core.MY_PRINT_FUNC("Done!")
	return None
Beispiel #4
0
    vmdlib.write_vmd(output_filename_vmd, vmd, moreinfo=moreinfo)

    # H = plt.hist([j for j in ANGLE_SHARPNESS_FACTORS if j!=0 and j!=1], bins=40, density=True)
    print("factors=", len(ANGLE_SHARPNESS_FACTORS))
    H = plt.hist(ANGLE_SHARPNESS_FACTORS, bins=16, density=True)
    plt.show()

    core.MY_PRINT_FUNC("Done!")
    return None


if __name__ == '__main__':
    print(_SCRIPT_VERSION)
    if DEBUG:
        # print info to explain the purpose of this file
        core.MY_PRINT_FUNC(helptext)
        core.MY_PRINT_FUNC("")

        main()
        core.pause_and_quit("Done with everything! Goodbye!")
    else:
        try:
            # print info to explain the purpose of this file
            core.MY_PRINT_FUNC(helptext)
            core.MY_PRINT_FUNC("")

            main()
            core.pause_and_quit("Done with everything! Goodbye!")
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
Beispiel #5
0
        f = core.my_list_search(pmx.frames, lambda x: x.name_jp == jp_newik)
        if f is not None:
            # frame already exists, delete it
            pmx.frames.pop(f)

        pass

    # write out
    output_filename = core.get_unused_file_name(output_filename)
    pmxlib.write_pmx(output_filename, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None


if __name__ == '__main__':
    core.MY_PRINT_FUNC("Nuthouse01 - 08/24/2020 - v5.00")
    if DEBUG:
        # print info to explain the purpose of this file
        core.MY_PRINT_FUNC(helptext)
        core.MY_PRINT_FUNC("")

        main()
        core.pause_and_quit("Done with everything! Goodbye!")
    else:
        try:
            # print info to explain the purpose of this file
            core.MY_PRINT_FUNC(helptext)
            core.MY_PRINT_FUNC("")

            main()
            core.pause_and_quit("Done with everything! Goodbye!")
Beispiel #6
0
def main(moreinfo=True):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX input file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    # to shift the model by a set amount:
    # first, ask user for X Y Z

    # create the prompt popup
    scale_str = core.MY_GENERAL_INPUT_FUNC(
        lambda x: (model_shift.is_3float(x) is not None), [
            "Enter the X,Y,Z amount to scale this model by:",
            "Three decimal values separated by commas.",
            "Empty input will quit the script."
        ])

    # if empty, quit
    if scale_str == "":
        core.MY_PRINT_FUNC("quitting")
        return None
    # use the same func to convert the input string
    scale = model_shift.is_3float(scale_str)

    uniform_scale = (scale[0] == scale[1] == scale[2])
    if not uniform_scale:
        core.MY_PRINT_FUNC(
            "Warning: when scaling by non-uniform amounts, rigidbody sizes will not be modified"
        )

    ####################
    # what does it mean to scale the entire model?
    # scale vertex position, sdef params
    # ? scale vertex normal vectors, then normalize? need to convince myself of this interaction
    # scale bone position, tail offset
    # scale fixedaxis and localaxis vectors, then normalize
    # scale vert morph, bone morph
    # scale rigid pos, size
    # scale joint pos, movelimits

    for v in pmx.verts:
        # vertex position
        for i in range(3):
            v.pos[i] *= scale[i]
        # vertex normal
        for i in range(3):
            if scale[i] != 0:
                v.norm[i] /= scale[i]
            else:
                v.norm[i] = 100000
        # then re-normalize the normal vector
        L = core.my_euclidian_distance(v.norm)
        if L != 0:
            v.norm = [n / L for n in v.norm]
        # c, r0, r1 params of every SDEF vertex
        if v.weighttype == 3:
            for param in v.weight_sdef:
                for i in range(3):
                    param[i] *= scale[i]

    for b in pmx.bones:
        # bone position
        for i in range(3):
            b.pos[i] *= scale[i]
        # bone tail if using offset mode
        if not b.tail_usebonelink:
            for i in range(3):
                b.tail[i] *= scale[i]
        # scale fixedaxis and localaxis vectors, then normalize
        if b.has_fixedaxis:
            for i in range(3):
                b.fixedaxis[i] *= scale[i]
            # then re-normalize
            L = core.my_euclidian_distance(b.fixedaxis)
            if L != 0:
                b.fixedaxis = [n / L for n in b.fixedaxis]
        # scale fixedaxis and localaxis vectors, then normalize
        if b.has_localaxis:
            for i in range(3):
                b.localaxis_x[i] *= scale[i]
            for i in range(3):
                b.localaxis_z[i] *= scale[i]
            # then re-normalize
            L = core.my_euclidian_distance(b.localaxis_x)
            if L != 0:
                b.localaxis_x = [n / L for n in b.localaxis_x]
            L = core.my_euclidian_distance(b.localaxis_z)
            if L != 0:
                b.localaxis_z = [n / L for n in b.localaxis_z]

    for m in pmx.morphs:
        # vertex morph and bone morph (only translate, not rotate)
        if m.morphtype in (1, 2):
            morph_scale.morph_scale(m, scale, bone_mode=1)

    for rb in pmx.rigidbodies:
        # rigid body position
        for i in range(3):
            rb.pos[i] *= scale[i]
        # rigid body size
        # NOTE: rigid body size is a special conundrum
        # spheres have only one dimension, capsules have two, and only boxes have 3
        # what's the "right" way to scale a sphere by 1,5,1? there isn't a right way!
        # boxes and capsules can be rotated and stuff so their axes dont line up with world axes, too
        # is it at least possible to rotate bodies so they are still aligned with their bones?
        # eh, why even bother with any of that. 95% of the time full-model scale will be uniform scaling.
        # only scale the rigidbody size if doing uniform scaling: that is guaranteed to be safe!
        if uniform_scale:
            for i in range(3):
                rb.size[i] *= scale[i]

    for j in pmx.joints:
        # joint position
        for i in range(3):
            j.pos[i] *= scale[i]
        # joint min slip
        for i in range(3):
            j.movemin[i] *= scale[i]
        # joint max slip
        for i in range(3):
            j.movemax[i] *= scale[i]

    # that's it? that's it!

    # write out
    output_filename_pmx = input_filename_pmx[0:-4] + "_scale.pmx"
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #7
0
def translate_to_english(pmx: pmxstruct.Pmx, moreinfo=False):
    # for each category,
    # 	for each name,
    # 		check for type 0/1/2 (already good, copy JP, exact match in special dict)
    # 		create translate_entry regardless what happens
    # do same thing for model name
    # then for all that didn't get successfully translated,
    # do bulk local piecewise translate: list(str) -> list(str)
    # then for all that didn't get successfully translated,
    # do bulk google piecewise translate: list(str) -> list(str)
    # then sort the results
    # then format & print the results

    # step zero: set up the translator thingy
    init_googletrans()

    # if JP model name is empty, give it something. same for comment
    if pmx.header.name_jp == "":
        pmx.header.name_jp = "model"
    if pmx.header.comment_jp == "":
        pmx.header.comment_jp = "comment"

    translate_maps = []

    # repeat the following for each category of visible names:
    # materials=4, bones=5, morphs=6, dispframe=7
    cat_id_list = list(range(4, 8))
    category_list = [pmx.materials, pmx.bones, pmx.morphs, pmx.frames]
    for cat_id, category in zip(cat_id_list, category_list):
        # for each entry:
        for d, item in enumerate(category):
            # skip "special" display frames
            if isinstance(item, pmxstruct.PmxFrame) and item.is_special:
                continue
            # strip away newline and return just in case, i saw a few examples where they showed up
            item.name_jp = item.name_jp.replace('\r', '').replace('\n', '')
            item.name_en = item.name_en.replace('\r', '').replace('\n', '')
            # try to apply "easy" translate methods
            newname, source = easy_translate(item.name_jp, item.name_en,
                                             specificdict_dict[cat_id])
            # build the "trans_entry" item from this result, regardless of pass/fail
            newentry = translate_entry(item.name_jp, item.name_en, cat_id, d,
                                       newname, source)
            # store it
            translate_maps.append(newentry)

    # model name is special cuz there's only one & its indexing is different
    # but i'm doing the same stuff
    pmx.header.name_jp = pmx.header.name_jp.replace('\r', '').replace('\n', '')
    pmx.header.name_en = pmx.header.name_en.replace('\r', '').replace('\n', '')
    # try to apply "easy" translate methods
    newname, source = easy_translate(pmx.header.name_jp, pmx.header.name_en,
                                     None)
    # build the "trans_entry" item from this result, regardless of pass/fail
    newentry = translate_entry(pmx.header.name_jp, pmx.header.name_en, 0, 2,
                               newname, source)
    # store it
    translate_maps.append(newentry)

    if TRANSLATE_MODEL_COMMENT:
        # here, attempt to match model comment with type0 (already good) or type1 (copy JP)
        newcomment, newcommentsource = easy_translate(pmx.header.comment_jp,
                                                      pmx.header.comment_en,
                                                      None)
    else:
        newcomment = pmx.header.comment_en
        newcommentsource = 0  # 0 means kept good aka nochange

    # now I have all the translateable items (except for model comment) collected in one list
    # partition the list into done and notdone
    translate_maps, translate_notdone = core.my_list_partition(
        translate_maps, lambda x: x.trans_type != -1)
    ########
    # actually do local translate
    local_results = translation_tools.local_translate(
        [item.jp_old for item in translate_notdone])
    # determine if each item passed or not, update the en_new and trans_type fields
    for item, result in zip(translate_notdone, local_results):
        if not translation_tools.needs_translate(result):
            item.en_new = result
            item.trans_type = 3
    # grab the newly-done items and move them to the done list
    translate_done2, translate_notdone = core.my_list_partition(
        translate_notdone, lambda x: x.trans_type != -1)
    translate_maps.extend(translate_done2)
    ########
    if not PREFER_EXISTING_ENGLISH_NAME:
        # if i chose to anti-prefer the existing EN name, then it is still preferred over google and should be checked here
        for item in translate_notdone:
            # first, if en name is already good (not blank and not JP), just keep it
            if item.en_old and not item.en_old.isspace() and item.en_old.lower() not in FORBIDDEN_ENGLISH_NAMES \
              and not translation_tools.needs_translate(item.en_old):
                item.en_new = item.en_old
                item.trans_type = 0
        # transfer the newly-done things over to the translate_maps list
        translate_done2, translate_notdone = core.my_list_partition(
            translate_notdone, lambda x: x.trans_type != -1)
        translate_maps.extend(translate_done2)

    ########
    # actually do google translate
    num_items = len(translate_notdone) + (newcommentsource != 0)
    if num_items:
        core.MY_PRINT_FUNC(
            "... identified %d items that need Internet translation..." %
            num_items)
        try:
            google_results = google_translate(
                [item.jp_old for item in translate_notdone])
            # determine if each item passed or not, update the en_new and trans_type fields
            for item, result in zip(translate_notdone, google_results):
                # always accept the google result, pass or fail it's the best i've got
                item.en_new = result
                # determine whether it passed or failed for display purposes
                # failure probably due to unusual geometric symbols, not due to japanese text
                if translation_tools.needs_translate(result):
                    item.trans_type = -1
                else:
                    item.trans_type = 4
            # grab the newly-done items and move them to the done list
            translate_maps.extend(translate_notdone)
            # comment!
            if TRANSLATE_MODEL_COMMENT and newcommentsource == -1:  # -1 = pending, 0 = did nothing, 4 = did something
                # if i am going to translate the comment, but was unable to do it earlier, then do it now
                core.MY_PRINT_FUNC("Now translating model comment")
                comment_clean = pmx.header.comment_jp.replace(
                    "\r", "")  # delete these \r chars, google doesnt want them
                comment_clean = comment_clean.strip(
                )  # trim leading/trailing whitespace too
                ########
                # actually do google translate
                if check_translate_budget(1):
                    newcomment = _single_google_translate(comment_clean)
                    newcomment = newcomment.replace(
                        '\n', '\r\n')  # put back the /r/n, MMD needs them
                    newcommentsource = 4
                else:
                    # no budget for just one more? oh well, no change
                    newcomment = pmx.header.comment_en
                    newcommentsource = 0
        except Exception as e:
            core.MY_PRINT_FUNC(e.__class__.__name__, e)
            core.MY_PRINT_FUNC(
                "Internet translate unexpectedly failed, attempting to recover..."
            )
            # for each in translate-notdone, set status to fail, set newname to oldname (so it won't change)
            for item in translate_notdone:
                item.trans_type = -1
                item.en_new = item.en_old
            # append to translate_maps
            translate_maps.extend(translate_notdone)

    ###########################################
    # done translating!!!!!
    ###########################################

    # sanity check: if old result matches new result, then force type to be nochange
    # only relevant if PREFER_EXISTING_ENGLISH_NAME = False
    for m in translate_maps:
        if m.en_old == m.en_new and m.trans_type not in (-1, 0):
            m.trans_type = 0
    # now, determine if i actually changed anything at all before bothering to try applying stuff
    type_fail, temp = core.my_list_partition(translate_maps,
                                             lambda x: x.trans_type == -1)
    type_good, temp = core.my_list_partition(temp, lambda x: x.trans_type == 0)
    type_copy, temp = core.my_list_partition(temp, lambda x: x.trans_type == 1)
    type_exact, temp = core.my_list_partition(temp,
                                              lambda x: x.trans_type == 2)
    type_local, temp = core.my_list_partition(temp,
                                              lambda x: x.trans_type == 3)
    type_google = temp
    # number of things I could have translated
    total_fields = len(translate_maps) + int(TRANSLATE_MODEL_COMMENT)
    # number of things that weren't already good (includes changed and fail)
    total_changed = total_fields - len(type_good) - int(newcommentsource != 0)
    if type_fail:
        # warn about any strings that failed translation
        core.MY_PRINT_FUNC(
            "WARNING: %d items were unable to be translated, try running the script again or doing translation manually."
            % len(type_fail))
    if total_changed == 0:
        core.MY_PRINT_FUNC("No changes are required")
        return pmx, False

    ###########################################
    # next, apply!
    # comment
    if TRANSLATE_MODEL_COMMENT and newcommentsource != 0:
        pmx.header.comment_en = newcomment
    # everything else: iterate over all entries, write when anything has type != 0
    for item in translate_maps:
        # writeback any source except "nochange"
        # even writeback fail type, because fail will be my best-effort translation
        # if its being translated thats cuz old_en is bad, so im not making it any worse
        # failure probably due to unusual geometric symbols, not due to japanese text
        if item.trans_type != 0:
            if item.cat_id == 0:  # this is header-type, meaning this is model name
                pmx.header.name_en = item.en_new
            elif item.cat_id == 4:
                pmx.materials[item.idx].name_en = item.en_new
            elif item.cat_id == 5:
                pmx.bones[item.idx].name_en = item.en_new
            elif item.cat_id == 6:
                pmx.morphs[item.idx].name_en = item.en_new
            elif item.cat_id == 7:
                pmx.frames[item.idx].name_en = item.en_new
            else:
                core.MY_PRINT_FUNC(
                    "ERROR: translate_map has invalid cat_id=%s, how the hell did that happen?"
                    % str(item.cat_id))

    ###########################################
    # next, print info!
    core.MY_PRINT_FUNC(
        "Translated {} / {} = {:.1%} english fields in the model".format(
            total_changed, total_fields, total_changed / total_fields))
    if moreinfo or type_fail:
        # give full breakdown of each source if requested OR if any fail
        core.MY_PRINT_FUNC(
            "Total fields={}, nochange={}, copy={}, exactmatch={}, piecewise={}, Google={}, fail={}"
            .format(total_fields, len(type_good), len(type_copy),
                    len(type_exact), len(type_local), len(type_google),
                    len(type_fail)))
        #########
        # now print the table of before/after/etc
        if moreinfo:
            if SHOW_ALL_CHANGED_FIELDS:
                # show everything that isn't nochange
                maps_printme = [
                    item for item in translate_maps if item.trans_type != 0
                ]
            else:
                # hide good/copyJP/exactmatch cuz those are uninteresting and guaranteed to be safe
                # only show piecewise and google translations and fails
                maps_printme = [
                    item for item in translate_maps
                    if item.trans_type > 2 or item.trans_type == -1
                ]
        else:
            # if moreinfo not enabled, only show fails
            maps_printme = type_fail
        if maps_printme:
            # first, SORT THE LIST! print items in PMXE order
            maps_printme.sort(key=lambda x: x.idx)
            maps_printme.sort(key=lambda x: x.cat_id)
            # then, justify each column
            # columns: category, idx, trans_type, en_old, en_new, jp_old = 6 types
            # bone  15  google || EN: 'asdf' --> 'foobar' || JP: 'fffFFFff'
            just_cat = core.MY_JUSTIFY_STRINGLIST(
                [category_dict[vv.cat_id] for vv in maps_printme])
            just_idx = core.MY_JUSTIFY_STRINGLIST(
                [str(vv.idx) for vv in maps_printme],
                right=True)  # this is right-justify, all others are left
            just_source = core.MY_JUSTIFY_STRINGLIST(
                [type_dict[vv.trans_type] for vv in maps_printme])
            just_enold = core.MY_JUSTIFY_STRINGLIST(
                ["'%s'" % vv.en_old for vv in maps_printme])
            just_ennew = core.MY_JUSTIFY_STRINGLIST(
                ["'%s'" % vv.en_new for vv in maps_printme])
            just_jpold = ["'%s'" % vv.jp_old for vv in maps_printme
                          ]  # no justify needed for final item

            # now pretty-print the list of translations:
            for args in zip(just_cat, just_idx, just_source, just_enold,
                            just_ennew, just_jpold):
                core.MY_PRINT_FUNC(
                    "{} {} {} || EN: {} --> {} || JP: {}".format(*args))

    ###########################################
    # next, return!
    return pmx, True
Beispiel #8
0
def main(moreinfo=True):
    # copied codes
    core.MY_PRINT_FUNC("Please enter name of PMX model file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")

    # object
    pmx_file_obj: pmxstruct.Pmx = pmxlib.read_pmx(input_filename_pmx,
                                                  moreinfo=moreinfo)

    # since there is an update to Valve Bip tools (I guess?), there is different bone names: the old and new one
    # only prefixes are changed along with order, thus there is a little bit scripting here to find the last leg
    big_dict: dict = {**body_dict, **leg_dict, **arm_dict, **finger_dict}

    #########################################################################
    # stage 1: create & insert core/base bones (grooves, mother,...)
    #########################################################################

    # base bone section
    # base order: 上半身, 下半身, 腰 (b_1), グルーブ, センター, 全ての親
    base_bone_4_name = "全ての親"  # motherbone
    base_bone_3_name = "センター"  # center
    base_bone_2_name = "グルーブ"  # groove
    base_bone_1_name = "腰"  # waist

    # note: Source models apparently have a much larger scale than MMD models
    base_bone_4_pos = [0, 0, 0]
    base_bone_3_pos = [0, 21, -0.533614993095398]
    base_bone_2_pos = base_bone_3_pos
    base_bone_1_pos = [0, 32, -0.533614993095398]

    # pelvis_pos = [-4.999999873689376e-06, 38.566917419433594, -0.533614993095398]

    # 全ての親, name_en, [0.0, 0.0, -0.4735046625137329], -1, 0, False,
    # True, True, True, True,
    # False, [0.0, 0.0, 0.0], False, False, None,
    # None, False, None, False, None, None, False, None, False,
    # None, None, None, None

    # base order: 上半身, 下半身, 腰 (b_1), グルーブ, センター, 全ての親
    base_bone_4_obj = pmxstruct.PmxBone(
        name_jp=base_bone_4_name,
        name_en="",
        pos=base_bone_4_pos,
        parent_idx=-1,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=False,
        tail_usebonelink=False,
        tail=[0, 3, 0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
    )
    insert_single_bone(pmx_file_obj, base_bone_4_obj, 0)

    base_bone_3_obj = pmxstruct.PmxBone(
        name_jp=base_bone_3_name,
        name_en="",
        pos=base_bone_3_pos,
        parent_idx=0,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=False,
        tail_usebonelink=False,
        tail=[0, -3, 0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
    )
    insert_single_bone(pmx_file_obj, base_bone_3_obj, 1)

    base_bone_2_obj = pmxstruct.PmxBone(
        name_jp=base_bone_2_name,
        name_en="",
        pos=base_bone_2_pos,
        parent_idx=1,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=False,
        tail_usebonelink=False,
        tail=[0, 0, 1.5],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
    )
    insert_single_bone(pmx_file_obj, base_bone_2_obj, 2)

    base_bone_1_obj = pmxstruct.PmxBone(
        name_jp=base_bone_1_name,
        name_en="",
        pos=base_bone_1_pos,
        parent_idx=2,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=False,
        tail_usebonelink=False,
        tail=[0, 0, 0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
    )
    insert_single_bone(pmx_file_obj, base_bone_1_obj, 3)

    #########################################################################
    # phase 2: translate Source names to MMD names
    #########################################################################

    # for each mapping of source-name to mmd-name,
    for mmd_name, source_possible_names in big_dict.items():
        # for each bone,
        for index, bone_object in enumerate(pmx_file_obj.bones):
            # if it has a source-name, replace with mmd-name
            if bone_object.name_jp in source_possible_names:
                pmx_file_obj.bones[index].name_jp = mmd_name

    # next, fix the lowerbody bone
    # find lowerbod
    lowerbod_obj = core.my_list_search(pmx_file_obj.bones,
                                       lambda x: x.name_jp == "下半身",
                                       getitem=True)
    # elif bone_object.name_jp in ["ValveBiped.Bip01_Pelvis", "bip_pelvis"]:
    if lowerbod_obj is not None:
        # should not be translateable
        lowerbod_obj.has_translate = False
        # parent should be waist
        lowerbod_obj.parent_idx = 3
    # next, fix the upperbody bone
    upperbod_obj = core.my_list_search(pmx_file_obj.bones,
                                       lambda x: x.name_jp == "上半身",
                                       getitem=True)
    if upperbod_obj is not None:
        # should not be translateable
        upperbod_obj.has_translate = False
        # parent should be waist
        upperbod_obj.parent_idx = 3

    #########################################################################
    # phase 3: create & insert IK bones for leg/toe
    #########################################################################
    # find the last leg item index
    # when creating IK bones, want to insert the IK bones after both legs
    r_l_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "右足")
    r_k_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "右ひざ")
    r_a_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "右足首")
    r_t_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "右つま先")
    l_l_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "左足")
    l_k_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "左ひざ")
    l_a_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "左足首")
    l_t_index = core.my_list_search(pmx_file_obj.bones,
                                    lambda x: x.name_jp == "左つま先")
    # if somehow they aren't found, default to 0
    if r_l_index is None: r_l_index = 0
    if r_k_index is None: r_k_index = 0
    if r_a_index is None: r_a_index = 0
    if r_t_index is None: r_t_index = 0
    if l_l_index is None: l_l_index = 0
    if l_k_index is None: l_k_index = 0
    if l_a_index is None: l_a_index = 0
    if l_t_index is None: l_t_index = 0

    if r_t_index > l_t_index:
        last_leg_item_index = r_t_index
    else:
        last_leg_item_index = l_t_index

    leg_left_ik_name = "左足IK"
    leg_left_toe_ik_name = "左つま先IK"
    leg_right_ik_name = "右足IK"
    leg_right_toe_ik_name = "右つま先IK"

    # these limits in degrees
    knee_limit_1 = [-180, 0.0, 0.0]
    knee_limit_2 = [-0.5, 0.0, 0.0]
    # other parameters
    ik_loops = 40
    ik_toe_loops = 8
    ik_angle = 114.5916  # degrees, =2 radians
    ik_toe_angle = 229.1831  # degrees, =4 radians

    # adding IK and such
    leg_left_ankle_obj = pmx_file_obj.bones[l_a_index]
    leg_left_toe_obj = pmx_file_obj.bones[l_t_index]
    leg_right_ankle_obj = pmx_file_obj.bones[r_a_index]
    leg_right_toe_obj = pmx_file_obj.bones[r_t_index]

    leg_left_ankle_pos = leg_left_ankle_obj.pos
    leg_left_toe_pos = leg_left_toe_obj.pos
    leg_right_ankle_pos = leg_right_ankle_obj.pos
    leg_right_toe_pos = leg_right_toe_obj.pos

    # toe /// places of some value wont match with the struct /// taken from hololive's korone model
    # name, name, [-0.823277473449707, 0.2155265510082245, -1.8799238204956055], 112, 0, False,
    # True, True, True, True,
    # False, [0.0, -1.3884940147399902, 1.2653569569920364e-07] /// This is offset, False, False, None,
    # None, False, None, False, None, None, False, None, True,
    # 111, 160, 1.0, [[110, None, None]]

    # leg
    # 右足IK, en_name, [-0.8402935862541199, 1.16348397731781, 0.3492986857891083], 0, 0, False,
    # True, True, True, True,
    # False, [0.0, -2.53071505085245e-07, 1.3884940147399902], False, False, None,
    # None, False, None, False, None, None, False, None, True,
    # 110, 85, 1.9896754026412964, [[109, [-3.1415927410125732, 0.0, 0.0], [-0.008726646192371845, 0.0, 0.0]]
    # /// These ik_links are in radians /// , [108, None, None]]

    leg_left_ik_obj = pmxstruct.PmxBone(
        name_jp=leg_left_ik_name,
        name_en="",
        pos=leg_left_ankle_pos,
        parent_idx=0,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=True,
        tail_usebonelink=False,
        tail=[0.0, 0.0, 1.0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
        ik_target_idx=l_a_index,
        ik_numloops=ik_loops,
        ik_angle=ik_angle,
        ik_links=[
            pmxstruct.PmxBoneIkLink(idx=l_k_index,
                                    limit_min=knee_limit_1,
                                    limit_max=knee_limit_2),
            pmxstruct.PmxBoneIkLink(idx=l_l_index)
        ],
    )
    insert_single_bone(pmx_file_obj, leg_left_ik_obj, last_leg_item_index + 1)

    leg_left_toe_ik_obj = pmxstruct.PmxBone(
        name_jp=leg_left_toe_ik_name,
        name_en="",
        pos=leg_left_toe_pos,
        parent_idx=last_leg_item_index + 1,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=True,
        tail_usebonelink=False,
        tail=[0.0, -1.0, 0.0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
        ik_target_idx=l_t_index,
        ik_numloops=ik_toe_loops,
        ik_angle=ik_toe_angle,
        ik_links=[pmxstruct.PmxBoneIkLink(idx=l_a_index)],
    )
    insert_single_bone(pmx_file_obj, leg_left_toe_ik_obj,
                       last_leg_item_index + 2)

    leg_right_ik_obj = pmxstruct.PmxBone(
        name_jp=leg_right_ik_name,
        name_en="",
        pos=leg_right_ankle_pos,
        parent_idx=0,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=True,
        tail_usebonelink=False,
        tail=[0.0, 0.0, 1.0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
        ik_target_idx=r_a_index,
        ik_numloops=ik_loops,
        ik_angle=ik_angle,
        ik_links=[
            pmxstruct.PmxBoneIkLink(idx=r_k_index,
                                    limit_min=knee_limit_1,
                                    limit_max=knee_limit_2),
            pmxstruct.PmxBoneIkLink(idx=r_l_index)
        ],
    )
    insert_single_bone(pmx_file_obj, leg_right_ik_obj, last_leg_item_index + 3)

    leg_right_toe_ik_obj = pmxstruct.PmxBone(
        name_jp=leg_right_toe_ik_name,
        name_en="",
        pos=leg_right_toe_pos,
        parent_idx=last_leg_item_index + 3,
        deform_layer=0,
        deform_after_phys=False,
        has_rotate=True,
        has_translate=True,
        has_visible=True,
        has_enabled=True,
        has_ik=True,
        tail_usebonelink=False,
        tail=[0.0, -1.0, 0.0],
        inherit_rot=False,
        inherit_trans=False,
        has_fixedaxis=False,
        has_localaxis=False,
        has_externalparent=False,
        ik_target_idx=r_t_index,
        ik_numloops=ik_toe_loops,
        ik_angle=ik_toe_angle,
        ik_links=[pmxstruct.PmxBoneIkLink(idx=r_a_index)],
    )
    insert_single_bone(pmx_file_obj, leg_right_toe_ik_obj,
                       last_leg_item_index + 4)

    # output the file
    output_filename_pmx = input_filename_pmx[0:-4] + "_sourcetrans.pmx"
    pmxlib.write_pmx(output_filename_pmx, pmx_file_obj, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #9
0
def normalize_weights(pmx: pmxstruct.Pmx) -> int:
    """
	Normalize weights for verts in the PMX object. Also "clean" the weights by removing bones with 0 weight, reducing
	weight type to lowest possible, and sorting them by greatest weight. Return the # of vertices that were modified.
	
	:param pmx: PMX object
	:return: int, # of vertices that were modified
	"""
    # number of vertices fixed
    weight_fix = 0

    # for each vertex:
    for d, vert in enumerate(pmx.verts):
        # clean/normalize the weights
        weighttype = vert.weighttype
        w = vert.weight
        # type0=BDEF1, one bone has 100% weight
        # do nothing
        # type1=BDEF2, 2 bones 1 weight (other is implicit)
        # merge, see if it can reduce to BDEF1
        # type2=BDEF4, 4 bones 4 weights
        # normalize, merge, see if it can reduce to BDEF1/2
        # type3=SDEF, 2 bones 1 weight and 12 values i don't understand.
        # nothing
        # type4=QDEF, 4 bones 4 weights
        # normalize, merge
        if weighttype == 0:  # BDEF1
            # nothing to be fixed here
            continue
        elif weighttype == 1:  # BDEF2
            # no need to normalize because the 2nd weight is implicit, not explicit
            # only merge dupes, look for reason to reduce to BDEF1: bones are same, weight is 0/1
            if w[0] == w[1] or w[
                    2] == 1:  # same bones handled the same way as firstbone with weight 1
                weight_fix += 1
                vert.weighttype = 0  # set to BDEF1
                vert.weight = [w[0]]
            elif w[2] == 0:  # firstbone has weight 0
                weight_fix += 1
                vert.weighttype = 0  # set to BDEF1
                vert.weight = [w[1]]
            continue
        elif weighttype == 2 or weighttype == 4:  # BDEF4/QDEF
            # qdef: check for dupes and also normalize
            bones = w[0:4]
            weights = w[4:8]
            is_modified = False

            # unify dupes
            usedbones = []
            for i in range(4):
                if not (bones[i] == 0
                        and weights[i] == 0.0) and (bones[i] in usedbones):
                    is_modified = True  # then this is a duplicate bone!
                    where = usedbones.index(
                        bones[i])  # find index where it was first used
                    weights[where] += weights[
                        i]  # combine this bone's weight with the first place it was used
                    bones[i] = 0  # set this bone to null
                    weights[i] = 0.0  # set this weight to 0
                # add to list of usedbones regardless of whether first or dupe
                usedbones.append(bones[i])

            # sort by greatest weight
            before = tuple(bones)
            together = list(zip(bones, weights))  # zip
            together.sort(reverse=True, key=lambda x: x[1])  # sort
            a, b = zip(*together)  # unzip
            if hash(before) != hash(a):  # did the order change?
                is_modified = True
                bones = list(a)
                weights = list(b)

            # normalize if needed
            s = sum(weights)
            if round(s, 6) != 1.0:
                if s == 0:
                    core.MY_PRINT_FUNC(
                        "Error: vert %d has BDEF4 weights that sum to 0, cannot normalize"
                        % d)
                    continue
                # it needs it, do it
                weights = [t / s for t in weights]
                is_modified = True

                try:
                    # where is the first 0 in the weight list? i know it is sorted descending
                    i = weights.index(0)
                    if i == 1:  # first zero at 1, therefore has 1 entry, therefore force to be BDEF1!
                        weight_fix += 1
                        vert.weighttype = 0  # set to BDEF1
                        vert.weight = [bones[0]]
                        continue
                    if weighttype == 2:  # BDEF4 ONLY: check if it can be reduced to BDEF2
                        if i == 2:  # first zero at 2, therefore has 2 nonzero entries, therefore force to be BDEF2!
                            weight_fix += 1
                            vert.weighttype = 1  # set to BDEF2
                            vert.weight = [bones[0], bones[1], weights[0]]
                            continue
                        # if i == 3, fall thru
                except ValueError:
                    pass  # if '0' not found in list, it is using all 4, fall thru

            # is QDEF, or was BDEF and determined to still be BDEF4
            # type stays the same, but have i changed the values? if so store and increment
            if is_modified:
                w[0:4] = bones
                w[4:8] = weights
                weight_fix += 1
        elif weighttype == 3:  # SDEF
            # the order of the bones makes a very very slight difference, so dont try to reorder them
            # do try to compress to BDEF1 if the bones are the same or if one has 100 or 0 weight
            if w[0] == w[1] or w[
                    2] == 1:  # same bones handled the same way as firstbone with weight 1
                weight_fix += 1
                vert.weighttype = 0  # set to BDEF1
                vert.weight = [w[0]]
            elif w[2] == 0:  # firstbone has weight 0
                weight_fix += 1
                vert.weighttype = 0  # set to BDEF1
                vert.weight = [w[1]]
            continue
        else:
            core.MY_PRINT_FUNC("ERROR: invalid weight type for vertex %d" % d)
        pass  # close the for-each-vert loop
    # how many did I change? printing is handled outside
    return weight_fix
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
    return None


def main():
    showhelp()
    pmx, name = showprompt()
    pmx, is_changed = prune_unused_vertices(pmx)
    if is_changed:
        end(pmx, name)
    core.pause_and_quit("Done with everything! Goodbye!")


if __name__ == '__main__':
    core.MY_PRINT_FUNC("Nuthouse01 - 12/20/2020 - v5.04")
    if DEBUG:
        main()
    else:
        try:
            main()
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
        except Exception as ee:
            # if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
            core.MY_PRINT_FUNC(ee)
            core.pause_and_quit(
                "ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho"
            )
def prune_unused_vertices(pmx: pmxstruct.Pmx, moreinfo=False):
    #############################
    # ready for logic

    # vertices are referenced in faces, morphs (uv and vertex morphs), and soft bodies (should be handled just for completeness' sake)

    # find individual vertices to delete
    #		build set of vertices used in faces
    #		build set of all vertices (just a range)
    #		subtract
    #		convert to sorted list
    # convert to list of [begin, length]
    #		iterate over delvertlist, identify contiguous blocks
    # convert to list of [begin, cumulative size]

    # build set of USED vertices
    used_verts = set()
    for face in pmx.faces:
        used_verts.add(face[0])
        used_verts.add(face[1])
        used_verts.add(face[2])
    # build set of ALL vertices
    all_verts = set(list(range(len(pmx.verts))))
    # derive set of UNUSED vertices
    unused_verts = all_verts.difference(used_verts)
    # convert to ordered list
    delme_verts = sorted(list(unused_verts))

    numdeleted = len(delme_verts)
    prevtotal = len(pmx.verts)
    if numdeleted == 0:
        core.MY_PRINT_FUNC("No changes are required")
        return pmx, False

    delme_range = delme_list_to_rangemap(delme_verts)

    if moreinfo:
        core.MY_PRINT_FUNC(
            "Detected %d orphan vertices arranged in %d contiguous blocks" %
            (len(delme_verts), len(delme_range[0])))

    # need to update places that reference vertices: faces, morphs, softbody
    # first get the total # of iterations I need to do, for progress purposes: #faces + sum of len of all UV and vert morphs
    totalwork = len(pmx.faces) + sum([
        len(m.items) for m in pmx.morphs if (m.morphtype in (1, 3, 4, 5, 6, 7))
    ])

    # faces:
    d = 0
    for d, face in enumerate(pmx.faces):
        # vertices in a face are not guaranteed sorted, and sorting them is a Very Bad Idea
        # therefore they must be remapped individually
        face[0] = newval_from_range_map(face[0], delme_range)
        face[1] = newval_from_range_map(face[1], delme_range)
        face[2] = newval_from_range_map(face[2], delme_range)
        # display progress printouts
        core.print_progress_oneline(d / totalwork)

    # core.MY_PRINT_FUNC("Done updating vertex references in faces")

    # morphs:
    orphan_vertex_references = 0
    for morph in pmx.morphs:
        # if not a vertex morph or UV morph, skip it
        if not morph.morphtype in (1, 3, 4, 5, 6, 7): continue
        lenbefore = len(morph.items)
        # it is plausible that vertex/uv morphs could reference orphan vertices, so I should check for and delete those
        i = 0
        while i < len(morph.items):
            # if the vertex being manipulated is in the list of verts being deleted,
            if core.binary_search_isin(morph.items[i].vert_idx, delme_verts):
                # delete it here too
                morph.items.pop(i)
                orphan_vertex_references += 1
            else:
                # otherwise, remap it
                # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once
                i += 1

        # morphs usually contain vertexes in sorted order, but not guaranteed!!! MAKE it sorted, nobody will mind
        morph.items.sort(key=lambda x: x.vert_idx)

        # separate the vertices from the morph entries into a list of their own, for more efficient remapping
        vertlist = [x.vert_idx for x in morph.items]
        # remap
        remappedlist = newval_from_range_map(vertlist, delme_range)
        # write the remapped values back into where they came from
        for x, newval in zip(morph.items, remappedlist):
            x.vert_idx = newval
        # display progress printouts
        d += lenbefore
        core.print_progress_oneline(d / totalwork)

    # core.MY_PRINT_FUNC("Done updating vertex references in morphs")

    # softbody: probably not relevant but eh
    for soft in pmx.softbodies:
        # anchors
        # first, delete any references to delme verts in the anchors
        i = 0
        while i < len(soft.anchors_list):
            # if the vertex referenced is in the list of verts being deleted,
            if core.binary_search_isin(soft.anchors_list[i][1], delme_verts):
                # delete it here too
                soft.anchors_list.pop(i)
            else:
                # otherwise, remap it
                # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once
                i += 1

        #  MAKE it sorted, nobody will mind
        soft.anchors_list.sort(key=lambda x: x[1])
        # extract the vert indices into a list of their town
        anchorlist = [x[1] for x in soft.anchors_list]
        # remap
        newanchorlist = newval_from_range_map(anchorlist, delme_range)
        # write the remapped values back into where they came from
        for x, newval in zip(soft.anchors_list, newanchorlist):
            x[1] = newval

        # vertex pins
        # first, delete any references to delme verts
        i = 0
        while i < len(soft.vertex_pin_list):
            # if the vertex referenced is in the list of verts being deleted,
            if core.binary_search_isin(soft.vertex_pin_list[i], delme_verts):
                # delete it here too
                soft.vertex_pin_list.pop(i)
            else:
                # otherwise, remap it
                # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once
                i += 1
        #  MAKE it sorted, nobody will mind
        soft.anchors_list.sort()
        # remap
        soft.vertex_pin_list = newval_from_range_map(soft.vertex_pin_list,
                                                     delme_range)
        # done with softbodies!

    # now, finally, actually delete the vertices from the vertex list
    delme_verts.reverse()
    for f in delme_verts:
        pmx.verts.pop(f)

    core.MY_PRINT_FUNC(
        "Identified and deleted {} / {} = {:.1%} vertices for being unused".
        format(numdeleted, prevtotal, numdeleted / prevtotal))

    return pmx, True
def main(moreinfo=True):
	# prompt PMX name
	core.MY_PRINT_FUNC("Please enter name of PMX input file:")
	input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
	pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
	
	##################################
	# user flow:
	# ask for the helper bone (to be merged)
	# ask for the destination bone (merged onto)
	# try to infer proper merge factor, if it cannot infer then prompt user
	# then write out to file
	##################################
		
	dest_idx = 0
	while True:
		# any input is considered valid
		s = core.MY_GENERAL_INPUT_FUNC(lambda x: True,
									   ["Please specify the DESTINATION bone that weights will be transferred to.",
										"Enter bone #, JP name, or EN name (names are case sensitive).",
										"Empty input will quit the script."])
		# if empty, leave & do nothing
		if s == "":
			dest_idx = -1
			break
		# then get the bone index from this
		# search JP names first
		dest_idx = core.my_list_search(pmx.bones, lambda x: x.name_jp == s)
		if dest_idx is not None: break  # did i find a match?
		# search EN names next
		dest_idx = core.my_list_search(pmx.bones, lambda x: x.name_en == s)
		if dest_idx is not None: break  # did i find a match?
		# try to cast to int next
		try:
			dest_idx = int(s)
			if 0 <= dest_idx < len(pmx.bones):
				break  # is this within the proper bounds?
			else:
				core.MY_PRINT_FUNC("valid bone indexes are 0-%d" % (len(pmx.bones) - 1))
		except ValueError:
			pass
		core.MY_PRINT_FUNC("unable to find matching bone for name '%s'" % s)
	
	if dest_idx == -1:
		core.MY_PRINT_FUNC("quitting")
		return None
	
	dest_tag = "bone #{} JP='{}' / EN='{}'".format(dest_idx, pmx.bones[dest_idx].name_jp, pmx.bones[dest_idx].name_jp)
	source_idx = 0
	while True:
		# any input is considered valid
		s = core.MY_GENERAL_INPUT_FUNC(lambda x: True,
									   ["Please specify the SOURCE bone that will be merged onto %s." % dest_tag,
										"Enter bone #, JP name, or EN name (names are case sensitive).",
										"Empty input will quit the script."])
		# if empty, leave & do nothing
		if s == "":
			source_idx = -1
			break
		# then get the morph index from this
		# search JP names first
		source_idx = core.my_list_search(pmx.bones, lambda x: x.name_jp == s)
		if source_idx is not None: break  # did i find a match?
		# search EN names next
		source_idx = core.my_list_search(pmx.bones, lambda x: x.name_en == s)
		if source_idx is not None: break  # did i find a match?
		# try to cast to int next
		try:
			source_idx = int(s)
			if 0 <= source_idx < len(pmx.bones):
				break  # is this within the proper bounds?
			else:
				core.MY_PRINT_FUNC("valid bone indexes are 0-%d" % (len(pmx.bones) - 1))
		except ValueError:
			pass
		core.MY_PRINT_FUNC("unable to find matching bone for name '%s'" % s)
	
	if source_idx == -1:
		core.MY_PRINT_FUNC("quitting")
		return None
	
	# print to confirm
	core.MY_PRINT_FUNC("Merging bone #{} JP='{}' / EN='{}' ===> bone #{} JP='{}' / EN='{}'".format(
		source_idx, pmx.bones[source_idx].name_jp, pmx.bones[source_idx].name_en,
		dest_idx, pmx.bones[dest_idx].name_jp, pmx.bones[dest_idx].name_en
	))
	# now try to infer the merge factor
	
	f = 0.0
	if pmx.bones[source_idx].inherit_rot and pmx.bones[source_idx].inherit_parent_idx == dest_idx and pmx.bones[source_idx].inherit_ratio != 0:
		# if using partial rot inherit AND inheriting from dest_idx AND ratio != 0, use that
		# think this is good, if twistbones exist they should be children of preferred
		f = pmx.bones[source_idx].inherit_ratio
	elif pmx.bones[source_idx].parent == dest_idx:
		# if they have a direct parent-child relationship, then factor is 1
		f = 1
	else:
		# otherwise, prompt for the factor
		factor_str = core.MY_GENERAL_INPUT_FUNC(is_float, "Unable to infer relationship, please specify a merge factor:")
		if factor_str == "":
			core.MY_PRINT_FUNC("quitting")
			return None
		f = float(factor_str)
		
	# do the actual transfer
	transfer_bone_weights(pmx, dest_idx, source_idx, f)
	
	# run the weight-cleanup function
	dummy = normalize_weights(pmx)
	
	# write out
	output_filename_pmx = input_filename_pmx[0:-4] + "_weightmerge.pmx"
	output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
	pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
	core.MY_PRINT_FUNC("Done!")
	return None
Beispiel #13
0
def main(moreinfo=True):
	# prompt PMX name
	core.MY_PRINT_FUNC("Please enter name of PMX input file:")
	input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
	pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
	
	core.MY_PRINT_FUNC("")
	# valid input is any string that can matched aginst a morph idx
	s = core.MY_GENERAL_INPUT_FUNC(lambda x: morph_scale.get_idx_in_pmxsublist(x, pmx.morphs) is not None,
	   ["Please specify the target morph: morph #, JP name, or EN name (names are not case sensitive).",
		"Empty input will quit the script."])
	# do it again, cuz the lambda only returns true/false
	target_index = morph_scale.get_idx_in_pmxsublist(s, pmx.morphs)
	
	# when given empty text, done!
	if target_index == -1 or target_index is None:
		core.MY_PRINT_FUNC("quitting")
		return None
	
	morphtype = pmx.morphs[target_index].morphtype
	# 1=vert
	# 3=UV
	# 8=material
	core.MY_PRINT_FUNC("Found {} morph #{}: '{}' / '{}'".format(
		mtype_dict[morphtype], target_index, pmx.morphs[target_index].name_jp, pmx.morphs[target_index].name_en))
	
	if morphtype == 1: # vertex
		# for each item in this morph:
		item:pmxstruct.PmxMorphItemVertex  # type annotation for pycharm
		for d, item in enumerate(pmx.morphs[target_index].items):
			# apply the offset
			pmx.verts[item.vert_idx].pos[0] += item.move[0]
			pmx.verts[item.vert_idx].pos[1] += item.move[1]
			pmx.verts[item.vert_idx].pos[2] += item.move[2]
			# invert the morph
		morph_scale.morph_scale(pmx.morphs[target_index], -1)
	elif morphtype == 3: # UV
		item:pmxstruct.PmxMorphItemUV  # type annotation for pycharm
		for d, item in enumerate(pmx.morphs[target_index].items):
			# (vert_idx, A, B, C, D)
			# apply the offset
			pmx.verts[item.vert_idx].uv[0] += item.move[0]
			pmx.verts[item.vert_idx].uv[1] += item.move[1]
			# invert the morph
		morph_scale.morph_scale(pmx.morphs[target_index], -1)
	elif morphtype in (4,5,6,7): # UV1 UV2 UV3 UV4
		whichuv = morphtype - 4
		item:pmxstruct.PmxMorphItemUV  # type annotation for pycharm
		for d, item in enumerate(pmx.morphs[target_index].items):
			# apply the offset
			pmx.verts[item.vert_idx].addl_vec4s[whichuv][0] += item.move[0]
			pmx.verts[item.vert_idx].addl_vec4s[whichuv][1] += item.move[1]
			pmx.verts[item.vert_idx].addl_vec4s[whichuv][2] += item.move[2]
			pmx.verts[item.vert_idx].addl_vec4s[whichuv][3] += item.move[3]
			# invert the morph
		morph_scale.morph_scale(pmx.morphs[target_index], -1)
	elif morphtype == 8: # material
		core.MY_PRINT_FUNC("WIP")
		# todo
		# to invert a material morph means inverting the material's visible/notvisible state as well as flipping the morph
		# hide morph add -> show morph add
		# hide morph mult -> show morph add
		# show morph add -> hide morph mult
		core.MY_PRINT_FUNC("quitting")
		return None
	else:
		core.MY_PRINT_FUNC("Unhandled morph type")
		core.MY_PRINT_FUNC("quitting")
		return None
	
	# write out
	output_filename_pmx = input_filename_pmx[0:-4] + ("_%dinv.pmx" % target_index)
	output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
	pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
	core.MY_PRINT_FUNC("Done!")
	return None
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
    return None


def main():
    showhelp()
    pmx, name = showprompt()
    pmx, is_changed = morph_winnow(pmx, PRINT_AFFECTED_MORPHS)
    if is_changed:
        end(pmx, name)
    core.pause_and_quit("Done with everything! Goodbye!")


if __name__ == '__main__':
    core.MY_PRINT_FUNC("Nuthouse01 - 10/10/2020 - v5.03")
    if DEBUG:
        main()
    else:
        try:
            main()
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
        except Exception as ee:
            # if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
            core.MY_PRINT_FUNC(ee)
            core.pause_and_quit(
                "ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho"
            )
def check3_match_keystr(rawlist_text: list, keystr: list):
    if rawlist_text[readfrom_line] != keystr:
        core.MY_PRINT_FUNC(
            "Err3: on line %d, incomplete or malformed .txt file: expected keyline '%s'"
            % (readfrom_line + 1, keystr))
        raise RuntimeError()
Beispiel #16
0
def main(moreinfo=True):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX input file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    # usually want to add/remove endpoints for many bones at once, so put all this in a loop
    num_changed = 0
    while True:
        core.MY_PRINT_FUNC("")
        # valid input is any string that can matched aginst a bone idx
        s = core.MY_GENERAL_INPUT_FUNC(
            lambda x:
            (morph_scale.get_idx_in_pmxsublist(x, pmx.bones) is not None), [
                "Please specify the target bone: bone #, JP name, or EN name (names are not case sensitive).",
                "Empty input will quit the script."
            ])
        # do it again, cuz the lambda only returns true/false
        target_index = morph_scale.get_idx_in_pmxsublist(s, pmx.bones)

        # when given empty text, done!
        if target_index == -1 or target_index is None:
            core.MY_PRINT_FUNC("quitting")
            break
        target_bone = pmx.bones[target_index]

        # print the bone it found
        core.MY_PRINT_FUNC("Found bone #{}: '{}' / '{}'".format(
            target_index, target_bone.name_jp, target_bone.name_en))

        if target_bone.tail_usebonelink:
            core.MY_PRINT_FUNC(
                "Was tailmode 'bonelink', changing to mode 'offset'")
            if target_bone.tail == -1:
                core.MY_PRINT_FUNC(
                    "Error: bone is not linked to anything, skipping")
                continue
            # find the location of the bone currently pointing at
            endpos = pmx.bones[target_bone.tail].pos
            # determine the equivalent offset vector
            offset = [endpos[i] - target_bone.pos[i] for i in range(3)]
            # write it into the bone
            target_bone.tail_usebonelink = False
            target_bone.tail = offset
            # done unlinking endpoint!
            pass

        else:
            core.MY_PRINT_FUNC(
                "Was tailmode 'offset', changing to mode 'bonelink' and adding new endpoint bone"
            )
            if target_bone.tail == [0, 0, 0]:
                core.MY_PRINT_FUNC(
                    "Error: bone has offset of [0,0,0], skipping")
                continue
            # determine the position of the new endpoint bone
            endpos = [
                target_bone.pos[i] + target_bone.tail[i] for i in range(3)
            ]
            # create the new bone
            newbone = pmxstruct.PmxBone(
                name_jp=target_bone.name_jp + endpoint_suffix_jp,
                name_en=target_bone.name_en + endpoint_suffix_en,
                pos=endpos,
                parent_idx=target_index,
                deform_layer=target_bone.deform_layer,
                deform_after_phys=target_bone.deform_after_phys,
                has_rotate=False,
                has_translate=False,
                has_visible=False,
                has_enabled=True,
                has_ik=False,
                has_localaxis=False,
                has_fixedaxis=False,
                has_externalparent=False,
                inherit_rot=False,
                inherit_trans=False,
                tail_usebonelink=True,
                tail=-1)
            # set the target to point at the new bone
            target_bone.tail_usebonelink = True
            target_bone.tail = len(pmx.bones)
            # append the new bone
            pmx.bones.append(newbone)
            # done adding endpoint!
            pass

        num_changed += 1
        pass

    if num_changed == 0:
        core.MY_PRINT_FUNC("Nothing was changed")
        return None

    core.MY_PRINT_FUNC("")

    # write out
    output_filename_pmx = input_filename_pmx[0:-4] + "_endpoints.pmx"
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #17
0
    # that's it? that's it!

    # write out
    output_filename_pmx = input_filename_pmx[0:-4] + "_scale.pmx"
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None


if __name__ == '__main__':
    print("Nuthouse01 - 10/10/2020 - v5.03")
    if DEBUG:
        # print info to explain the purpose of this file
        core.MY_PRINT_FUNC(helptext)
        core.MY_PRINT_FUNC("")

        main()
        core.pause_and_quit("Done with everything! Goodbye!")
    else:
        try:
            # print info to explain the purpose of this file
            core.MY_PRINT_FUNC(helptext)
            core.MY_PRINT_FUNC("")

            main()
            core.pause_and_quit("Done with everything! Goodbye!")
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
    return None


def main():
    showhelp()
    pmx, name = showprompt()
    pmx, is_changed = prune_unused_bones(pmx, PRINT_FOUND_UNUSED_BONES)
    if is_changed:
        end(pmx, name)
    core.pause_and_quit("Done with everything! Goodbye!")


if __name__ == '__main__':
    print(_SCRIPT_VERSION)
    if DEBUG:
        main()
    else:
        try:
            main()
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
        except Exception as ee:
            # if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
            core.MY_PRINT_FUNC(ee.__class__.__name__, ee)
            core.pause_and_quit(
                "ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho"
            )
Beispiel #19
0
def google_translate(in_list: STR_OR_STRLIST, strategy=1) -> STR_OR_STRLIST:
    """
	Take a list of strings & get them all translated by asking Google. Can use per-line strategy or new 'chunkwise' strategy.
	
	:param in_list: list of JP or partially JP strings
	:param strategy: 0=old per-line strategy, 1=new chunkwise strategy, 2=auto choose whichever needs less Google traffic
	:return: list of strings probably pure EN, but sometimes odd unicode symbols show up
	"""
    input_is_str = isinstance(in_list, str)
    if input_is_str:
        in_list = [
            in_list
        ]  # force it to be a list anyway so I don't have to change my structure

    use_chunk_strat = True if strategy == 1 else False

    # in_list -> pretrans -> jp_chunks_set -> jp_chunks -> jp_chunks_packets -> results_packets -> results
    # jp_chunks + results -> google_dict
    # pretrans + google_dict -> outlist

    # 1. pre-translate to take care of common tasks
    indents, bodies, suffixes = translation_tools.pre_translate(in_list)

    # 2. identify chunks
    jp_chunks_set = set()
    # idea: walk & look for transition from en to jp?
    for s in bodies:  # for every string to translate,
        rstart = 0
        prev_islatin = True
        is_latin = True
        for i in range(len(s)):  # walk along its length one char at a time,
            # use "is_jp" here and not "is_latin" so chunks are defined to be only actual JP stuff and not unicode whatevers
            is_latin = not translation_tools.is_jp(s[i])
            # if char WAS latin but now is NOT latin, then this is the start of a range.
            if prev_islatin and not is_latin:
                rstart = i
            # if it was jp and is now latin, then this is the end of a range (not including here). save it!
            elif is_latin and not prev_islatin:
                jp_chunks_set.add(s[rstart:i])
            prev_islatin = is_latin
        # now outside the loop... if i ended with a non-latin char, grab the final range & add that too
        if not is_latin:
            jp_chunks_set.add(s[rstart:len(s)])

    # 3. remove chunks I can already solve
    # maybe localtrans can solve one chunk but not the whole string?
    # chunks are guaranteed to not be PART OF compound words. but they are probably compound words themselves.
    # run local trans on each chunk individually, and if it succeeds, then DON'T send it to google.
    localtrans_dict = dict()
    jp_chunks = []
    for chunk in list(jp_chunks_set):
        trans = translation_tools.piecewise_translate(
            chunk, translation_tools.words_dict)
        if translation_tools.is_jp(trans):
            # if the localtrans failed, then the chunk needs to be sent to google later
            jp_chunks.append(chunk)
        else:
            # if it passed, no need to ask google what they mean cuz I already have a good translation for this chunk
            # this will be added to the dict way later
            localtrans_dict[chunk] = trans

    # 4. packetize them into fewer requests (and if auto, choose whether to use chunks or not)
    jp_chunks_packets = packetize_translate_requests(jp_chunks)
    jp_bodies_packets = packetize_translate_requests(bodies)
    if strategy == 2:
        use_chunk_strat = (len(jp_chunks_packets) < len(jp_bodies_packets))

    # 5. check the translate budget to see if I can afford this
    if use_chunk_strat: num_calls = len(jp_chunks_packets)
    else: num_calls = len(jp_bodies_packets)

    global _DISABLE_INTERNET_TRANSLATE
    if check_translate_budget(num_calls) and not _DISABLE_INTERNET_TRANSLATE:
        core.MY_PRINT_FUNC(
            "... making %d requests to Google Translate web API..." %
            num_calls)
    else:
        # no need to print failing statement, the function already does
        # don't quit early, run thru the same full structure & eventually return a copy of the JP names
        core.MY_PRINT_FUNC(
            "Just copying JP -> EN while Google Translate is disabled")
        _DISABLE_INTERNET_TRANSLATE = True

    # 6. send chunks to Google
    results_packets = []
    if use_chunk_strat:
        for d, packet in enumerate(jp_chunks_packets):
            core.print_progress_oneline(d / len(jp_chunks_packets))
            r = _single_google_translate(packet)
            results_packets.append(r)

        # 7. assemble Google responses & re-associate with the chunks
        # order of inputs "jp_chunks" matches order of outputs "results"
        results = unpacketize_translate_requests(results_packets)  # unpack
        google_dict = dict(zip(jp_chunks, results))  # build dict

        print("#items=", len(in_list), "#chunks=", len(jp_chunks),
              "#requests=", len(jp_chunks_packets))
        print(google_dict)

        google_dict.update(
            localtrans_dict
        )  # add dict entries from things that succeeded localtrans
        google_dict.update(
            translation_tools.words_dict
        )  # add the full-blown words dict to the chunk-translate results
        # dict->list->sort->dict: sort the longest chunks first, VERY CRITICAL so things don't get undershadowed!!!
        google_dict = dict(
            sorted(list(google_dict.items()),
                   reverse=True,
                   key=lambda x: len(x[0])))

        # 8. piecewise translate using newly created dict
        outlist = translation_tools.piecewise_translate(bodies, google_dict)
    else:
        # old style: just translate the strings directly and return their results
        for d, packet in enumerate(jp_bodies_packets):
            core.print_progress_oneline(d / len(jp_bodies_packets))
            r = _single_google_translate(packet)
            results_packets.append(r)
        outlist = unpacketize_translate_requests(results_packets)

    # last, reattach the indents and suffixes
    outlist_final = [i + b + s for i, b, s in zip(indents, outlist, suffixes)]

    if not _DISABLE_INTERNET_TRANSLATE:
        # if i did use internet translate, print this line when done
        core.MY_PRINT_FUNC("... done!")

    # return
    if input_is_str:
        return outlist_final[
            0]  # if original input was a single string, then de-listify
    else:
        return outlist_final  # otherwise return as a list
def identify_unused_bones(pmx: pmxstruct.Pmx, moreinfo: bool) -> List[int]:
    """
	Process the PMX and return a list of all unused bone indicies in the model.
	1. get bones used by a rigidbody.
	2. get bones that have weight on at least 1 vertex.
	3. mark "exception" bones, done here so parents of exception bones are kept too.
	4. inheritance, aka "bones used by bones", recursively climb the tree & get all bones the "true" used bones depend on.
	5. tails or point-ats.
	6. invert used to get set of unused.

	:param pmx: PMX list-of-lists object
	:param moreinfo: print extra info for debug or whatever
	:return: list of bone indices that are not used
	"""
    # python set: no duplicates! .add(newbone), "in", .discard(delbone)
    # true_used_bones is set of BONE INDEXES
    true_used_bones = set()  # exception bones + rigidbody bones + vertex bones
    vertex_ct = {
    }  # how many vertexes does each bone control? sometimes useful info

    # first: bones used by a rigidbody
    for body in pmx.rigidbodies:
        true_used_bones.add(body.bone_idx)

    # second: bones used by a vertex i.e. has nonzero weight
    # any vertex that has nonzero weight for that bone
    for vert in pmx.verts:
        for boneidx, weightval in vert.weight:
            if weightval != 0:
                true_used_bones.add(boneidx)
                core.increment_occurance_dict(vertex_ct, boneidx)

    # NOTE: some vertices/rigidbodies depend on "invalid" (-1) bones, clean that up here
    true_used_bones.discard(-1)

    # third: mark the "exception" bones as "used" if they are in the model
    for protect in BONES_TO_PROTECT:
        # get index from JP name
        i = core.my_list_search(pmx.bones, lambda x: x.name_jp == protect)
        if i is not None:
            true_used_bones.add(i)

    # build ik groups here
    # IKbone + chain + target are treated as a group... if any 1 is used, all of them are used. build those groups now.
    ik_groups = []  # list of sets
    for d, bone in enumerate(pmx.bones):
        if bone.has_ik:  # if ik enabled for this bone,
            ik_set = set()
            ik_set.add(d)  # this bone
            ik_set.add(bone.ik_target_idx)  # this bone's target
            for link in bone.ik_links:
                ik_set.add(link.idx)  # all this bone's IK links
            ik_groups.append(ik_set)

    # fourth: NEW APPROACH FOR SOLVING INHERITANCE: RECURSION!
    # for each bone that we know to be used, run UP the inheritance tree and collect everything that it depends on
    # recursion inputs: pmx bonelist, ik groups, set of already-known-used, and the bone to start from
    # bonelist is readonly, ik groups are readonly
    # set of already-known-used overlaps with set-being-built, probably just use one global ref to save time merging sets
    # standard way: input is set-of-already-known, return set-built-from-target, that requires merging results after each call tho
    # BUT each function level adds exactly 1 or 0 bones to the set, therefore can just modify the set that is being passed around

    def recursive_climb_inherit_tree(target: int, set_being_built):
        # implicitly inherits variables pmx, ik_groups from outer scope
        if target in set_being_built or target == -1:
            # stop condition: if this bone idx is already known to be used, i have already ran recursion from this node. don't do it again.
            # also abort if the target is -1 which means invalid bone
            return
        # if not already in the set, but recursion is being called on this, then this bone is a "parent" of a used bone and should be added.
        set_being_built.add(target)
        # now the parents of THIS bone are also used, so recurse into those.
        bb = pmx.bones[target]
        # acutal parent
        recursive_climb_inherit_tree(bb.parent_idx, set_being_built)
        # partial inherit: if partial rot or partial move, and ratio is nonzero and parent is valid
        if (bb.inherit_rot or bb.inherit_trans
            ) and bb.inherit_ratio != 0 and bb.inherit_parent_idx != -1:
            recursive_climb_inherit_tree(bb.inherit_parent_idx,
                                         set_being_built)
        # IK groups: if in an IK group, recurse to all members of that IK group
        for group in ik_groups:
            if target in group:
                for ik_member in group:
                    recursive_climb_inherit_tree(ik_member, set_being_built)

    parent_used_bones = set()  # true_used_bones + parents + point-at links
    # now that the recursive function is defined, actually invoke the function from every truly-used bone
    for tu in true_used_bones:
        recursive_climb_inherit_tree(tu, parent_used_bones)

    # fifth: "tail" or point-at links
    # propogate DOWN the inheritance tree exactly 1 level, no more.
    # also get all bones these tails depend on, it shouldn't depend on anything new but it theoretically can.
    final_used_bones = set()
    for bidx in parent_used_bones:
        b = pmx.bones[bidx]
        # if this bone has a tail,
        if b.tail_usebonelink:
            # add it and anything it depends on to the set.
            recursive_climb_inherit_tree(b.tail, final_used_bones)
    # now merge the two sets
    final_used_bones = final_used_bones.union(parent_used_bones)

    # sixth: assemble the final "unused" set by inverting
    # set of all bones, for inverting purposes
    all_bones_list = list(range(len(pmx.bones)))
    all_bones_set = set(all_bones_list)

    unused_bones = all_bones_set.difference(final_used_bones)
    unused_bones_list = sorted(list(unused_bones))

    # print neat stuff
    if moreinfo:
        if unused_bones_list:
            core.MY_PRINT_FUNC(
                "Bones: total=%d, true_used=%d, parents=%d, tails=%d, unused=%d"
                %
                (len(pmx.bones), len(true_used_bones), len(parent_used_bones) -
                 len(true_used_bones), len(final_used_bones) -
                 len(parent_used_bones), len(unused_bones_list)))
        # debug aid
        if PRINT_VERTICES_CONTROLLED_BY_EACH_BONE:
            core.MY_PRINT_FUNC("Number of vertices controlled by each bone:")
            for bp in all_bones_list:
                if bp in vertex_ct:
                    core.MY_PRINT_FUNC("#: %d    ct: %d" % (bp, vertex_ct[bp]))

    return unused_bones_list
Beispiel #21
0
def main(moreinfo=False):
	core.MY_PRINT_FUNC("Please enter name of PMX model file:")
	input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
	
	# step zero: set up the translator thingy
	translate_to_english.init_googletrans()
	
	# texture sorting plan:
	# 1. get startpath = basepath of input PMX
	# 2. get lists of relevant files
	# 	2a. extract top-level 'neighbor' pmx files from all-set
	# 3. ask about modifying neighbor PMX
	# 4. read PMX: either target or target+all neighbor
	# 5. "categorize files & normalize usages within PMX", NEW FUNC!!!
	# 6. translate all names via Google Trans, don't even bother with local dict
	# 7. mask out invalid windows filepath chars just to be safe
	# 8. print proposed names & other findings
	# 	for unused files under a folder, combine & replace with ***
	# 9. ask for confirmation
	# 10. zip backup (NEW FUNC!)
	# 11. apply renaming, NEW FUNC! rename all including old PMXes on disk
	# 12. get new names for PMXes, write PMX from mem to disk if any of its contents changed
	#	i.e. of all FileRecord with a new name, create a set of all the PMX that use them

	
	# absolute path to directory holding the pmx
	input_filename_pmx_abs = os.path.normpath(os.path.abspath(input_filename_pmx))
	startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs)
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs and relevant files
	relative_all_exist_files = file_sort_textures.walk_filetree_from_root(startpath)
	core.MY_PRINT_FUNC("ALL EXISTING FILES:", len(relative_all_exist_files))
	# now fill "neighbor_pmx" by finding files without path separator that end in PMX
	# these are relative paths tho
	neighbor_pmx = [f for f in relative_all_exist_files if 
					(f.lower().endswith(".pmx")) and
					(os.path.sep not in f) and
					f != input_filename_pmx_rel]
	
	# no filtering, all files are relevant
	relevant_exist_files = relative_all_exist_files
	
	core.MY_PRINT_FUNC("NEIGHBOR PMX FILES:", len(neighbor_pmx))
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# now ask if I care about the neighbors and read the PMXes into memory
	
	pmx_filenames = [input_filename_pmx_rel]
	
	if neighbor_pmx:
		core.MY_PRINT_FUNC("")
		info = [
			"Detected %d top-level neighboring PMX files, these probably share the same filebase as the target." % len(neighbor_pmx),
			"If files are moved/renamed but the neighbors are not processed, the neighbor texture references will probably break.",
			"Do you want to process all neighbors in addition to the target? (highly recommended)",
			"1 = Yes, 2 = No"]
		r = core.MY_SIMPLECHOICE_FUNC((1, 2), info)
		if r == 1:
			core.MY_PRINT_FUNC("Processing target + all neighbor files")
			# append neighbor PMX files onto the list of files to be processed
			pmx_filenames += neighbor_pmx
		else:
			core.MY_PRINT_FUNC("WARNING: Processing only target, ignoring %d neighbor PMX files" % len(neighbor_pmx))
	# now read all the PMX objects & store in dict alongside the relative name
	# dictionary where keys are filename and values are resulting pmx objects
	all_pmx_obj = {}
	for this_pmx_name in pmx_filenames:
		this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name), moreinfo=moreinfo)
		all_pmx_obj[this_pmx_name] = this_pmx_obj
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# 	for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk
	#	also fill out how much and how each file is used, and unify dupes between files, all that good stuff
	
	filerecord_list = file_sort_textures.categorize_files(all_pmx_obj, relevant_exist_files, moreinfo)
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# DETERMINE NEW NAMES FOR FILES
	
	# how to remap: build a list of all destinations (lowercase) to see if any proposed change would lead to collision
	all_new_names = set()
	
	# get new names via google
	# force it to use chunk-wise translate
	newname_list = translate_to_english.google_translate([p.name for p in filerecord_list], strategy=1)
	
	# now repair any windows-forbidden symbols that might have shown up after translation
	newname_list = [n.translate(invalid_windows_chars_ord) for n in newname_list]
	
	# iterate over the results in parallel with the FileRecord items
	for p, newname in zip(filerecord_list, newname_list):
		if newname != p.name:
			# resolve potential collisions by adding numbers suffix to file names
			# first need to make path absolute so get_unused_file_name can check the disk.
			# then check uniqueness against files on disk and files in namelist (files that WILL be on disk)
			newname = core.get_unused_file_name(os.path.join(startpath, newname), namelist=all_new_names)
			# now dest path is guaranteed unique against other existing files & other proposed name changes
			all_new_names.add(newname.lower())
			# make the path no longer absolute: undo adding "startpath" above
			newname = os.path.relpath(newname, startpath)
			p.newname = newname
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# NOW PRINT MY PROPOSED RENAMINGS and other findings
	
	# isolate the ones with proposed renaming
	translated_file = [u for u in filerecord_list if u.newname is not None]
	
	if translated_file:
		core.MY_PRINT_FUNC("="*60)
		core.MY_PRINT_FUNC("Found %d JP filenames to be translated:" % len(translated_file))
		oldname_list = core.MY_JUSTIFY_STRINGLIST([p.name for p in translated_file])
		newname_list = [p.newname for p in translated_file]
		zipped = list(zip(oldname_list, newname_list))
		zipped_and_sorted = sorted(zipped, key=lambda y: file_sort_textures.sortbydirdepth(y[0]))
		for o,n in zipped_and_sorted:
			# print 'from' with the case/separator it uses in the PMX
			core.MY_PRINT_FUNC("   {:s} --> {:s}".format(o, n))
		core.MY_PRINT_FUNC("="*60)
	else:
		core.MY_PRINT_FUNC("No proposed file changes")
		core.MY_PRINT_FUNC("Aborting: no files were changed")
		return None
	
	info = ["Do you accept these new names/locations?",
			"1 = Yes, 2 = No (abort)"]
	r = core.MY_SIMPLECHOICE_FUNC((1, 2), info)
	if r == 2:
		core.MY_PRINT_FUNC("Aborting: no files were changed")
		return None
	
	# =========================================================================================================
	# =========================================================================================================
	# =========================================================================================================
	# finally, do the actual renaming:
	
	# first, create a backup of the folder
	if MAKE_BACKUP_BEFORE_RENAMES:
		r = file_sort_textures.make_zipfile_backup(startpath, BACKUP_SUFFIX)
		if not r:
			# this happens if the backup failed somehow AND the user decided to quit
			core.MY_PRINT_FUNC("Aborting: no files were changed")
			return None
	
	# do all renaming on disk and in PMXes, and also handle the print statements
	file_sort_textures.apply_file_renaming(all_pmx_obj, filerecord_list, startpath)
	
	# write out
	for this_pmx_name, this_pmx_obj in all_pmx_obj.items():
		# what name do i write this pmx to? it may be different now! find it in the FileRecord!
		# this script does not filter filerecord_list so it is guaranteed to hae a record
		rec = None
		for r in filerecord_list:
			if r.name == this_pmx_name:
				rec = r
				break
		if rec.newname is None:
			# if there is no new name, write back to the name it had previously
			new_pmx_name = rec.name
		else:
			# if there is a new name, write to the new name
			new_pmx_name = rec.newname
		# make the name absolute
		output_filename_pmx = os.path.join(startpath, new_pmx_name)
		# write it, overwriting the existing file at that name
		pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=moreinfo)
	
	core.MY_PRINT_FUNC("Done!")
	return None
Beispiel #22
0
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=True)
    return None


def main():
    showhelp()
    pmx, name = showprompt()
    pmx, is_changed = dispframe_fix(pmx)
    if is_changed:
        end(pmx, name)
    core.pause_and_quit("Done with everything! Goodbye!")


if __name__ == '__main__':
    print(_SCRIPT_VERSION)
    if DEBUG:
        main()
    else:
        try:
            main()
        except (KeyboardInterrupt, SystemExit):
            # this is normal and expected, do nothing and die normally
            pass
        except Exception as ee:
            # if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
            core.MY_PRINT_FUNC(ee)
            core.pause_and_quit(
                "ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho"
            )
Beispiel #23
0
def main(moreinfo=True):
    # TODO: actually load it in MMD and verify that the curves look how they should
    #  not 100% certain that the order of interpolation values is correct for bone/cam frames

    # TODO: some sort of additional stats somehow?

    # TODO: progress % trackers?

    # prompt VMD file name
    core.MY_PRINT_FUNC("Please enter name of VMD motion input file:")
    input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd")

    # next, read/use/prune the dance vmd
    vmd = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo)

    core.MY_PRINT_FUNC("")

    # dictify the boneframes so i can deal with one bone at a time
    boneframe_dict = dictify_framelist(vmd.boneframes)

    # add the camframes to the dict so I can process them at the same time
    # this makes the typechecker angry
    if len(vmd.camframes) != 0:
        boneframe_dict[NAME_FOR_CAMFRAMES] = vmd.camframes

    # >>>>>> part 0: verify that there are no "multiple frames on the same timestep" situations
    # the MMD GUI shouldn't let this happen, but apparently it has happened... how???
    # the only explanation I can think of is that it's due to physics bones with names that are too long and
    # get truncated, and the uniquifying numbers are in the part that got lost. they originally had one frame
    # per bone but because the names were truncated they look like they're all the same name so it looks like
    # there are many frames for that non-real bone at the same timestep.
    for bonename, boneframe_list in boneframe_dict.items():
        # if a bone has only 1 (or 0?) frames associated with it then there's definitely no overlap probelm
        if len(boneframe_list) < 2:
            continue
        i = 0
        while i < len(boneframe_list) - 1:
            # look at all pairs of adjacent frames along a bone
            A = boneframe_list[i]
            B = boneframe_list[i + 1]
            # are they on the same timestep? if so, problem!
            if A.f == B.f:
                # are they setting the same pose?
                if A == B:
                    # if they are setting the same values at the same frame, just fix the problem silently
                    pass
                else:
                    # if they are trying to set different values at the same frame, this is a problem!
                    # gotta fix it to continue, but also gotta print some kind of warning
                    if bonename == NAME_FOR_CAMFRAMES:
                        core.MY_PRINT_FUNC(
                            "WARNING: at timestep t=%d, there are multiple cam frames trying to set different poses. How does this even happen???"
                            % A.f)
                    else:
                        core.MY_PRINT_FUNC(
                            "WARNING: at timestep t=%d, there are multiple frames trying to set bone '%s' to different poses. How does this even happen???"
                            % (A.f, bonename))
                    core.MY_PRINT_FUNC(
                        "I will delete one of them and continue.")
                # remove the 2nd one so that there is only one frame at each timestep
                boneframe_list.pop(i + 1)
                continue
            # otherwise, no problem at all
            i += 1

    # >>>>>> part 1: identify the desired slope for each metric of each frame
    core.MY_PRINT_FUNC("Finding smooth approach/depart slopes...")
    global CURRENT_BONENAME
    allbone_bezier_slopes = {}
    for bonename in sorted(boneframe_dict.keys()):
        CURRENT_BONENAME = bonename  # you're not supposed to pass info via global like this, but idgaf sue me
        boneframe_list = boneframe_dict[bonename]
        # this will hold all the resulting bezier slopes
        # each item corresponds to one frame and is stored as:
        # [approach posx,y,z,rot],[depart posx,y,z,rot]
        thisbone_bezier_slopes = []

        # for each sequence of frames on a single bone,
        for i in range(len(boneframe_list)):

            thisframe_bezier_approach = []
            thisframe_bezier_depart = []

            A = boneframe_list[i - 1] if i != 0 else None
            B = boneframe_list[i]
            C = boneframe_list[i + 1] if i != len(boneframe_list) - 1 else None
            # now i have the 3 frames I want to analyze
            # need to do the analysis for rotations & for positions

            # POSITION
            for j in range(3):
                A_point = (A.f, A.pos[j]) if (A is not None) else None
                B_point = (B.f, B.pos[j])
                C_point = (C.f, C.pos[j]) if (C is not None) else None
                # stuffed all operations into one function for encapsulation
                bez_a, bez_d = scalar_calculate_ideal_bezier_slope(
                    A_point, B_point, C_point)
                # store it
                thisframe_bezier_approach.append(bez_a)
                thisframe_bezier_depart.append(bez_d)

            # ROTATION
            A_point = (A.f, A.rot) if (A is not None) else None
            B_point = (B.f, B.rot)
            C_point = (C.f, C.rot) if (C is not None) else None
            # stuffed all operations into one function for encapsulation
            bez_a, bez_d = rotation_calculate_ideal_bezier_slope(
                A_point, B_point, C_point)
            # store it
            thisframe_bezier_approach.append(bez_a)
            thisframe_bezier_depart.append(bez_d)

            # CAMFRAME ONLY STUFF
            if bonename == NAME_FOR_CAMFRAMES:
                # the typechecker expects boneframes so it gets angry here
                # distance from camera to position
                A_point = (A.f, A.dist) if (A is not None) else None
                B_point = (B.f, B.dist)
                C_point = (C.f, C.dist) if (C is not None) else None
                # stuffed all operations into one function for encapsulation
                bez_a, bez_d = scalar_calculate_ideal_bezier_slope(
                    A_point, B_point, C_point)
                # store it
                thisframe_bezier_approach.append(bez_a)
                thisframe_bezier_depart.append(bez_d)
                # field of view
                A_point = (A.f, A.fov) if (A is not None) else None
                B_point = (B.f, B.fov)
                C_point = (C.f, C.fov) if (C is not None) else None
                # stuffed all operations into one function for encapsulation
                bez_a, bez_d = scalar_calculate_ideal_bezier_slope(
                    A_point, B_point, C_point)
                # store it
                thisframe_bezier_approach.append(bez_a)
                thisframe_bezier_depart.append(bez_d)

            # next i need to store them in some sensible manner
            # ..., [approach posx,y,z,rot], [depart posx,y,z,rot], ...
            thisbone_bezier_slopes.append(thisframe_bezier_approach)
            thisbone_bezier_slopes.append(thisframe_bezier_depart)
            pass  # end "for each frame in this bone"
        # now i have calculated all the desired bezier approach/depart slopes for both rotation and position
        # next i need to rearrange things slightly

        # currently the slopes are stored in "approach,depart" pairs associated with a single frame.
        # but the interpolation curves are stored as "depart, approach" associated with the segment leading up to a frame.
        # AKA, interpolation info stored with frame i is to interpolate from i-1 to i
        # therefore there is no place for the slope when interpolating away from the last frame, pop it
        thisbone_bezier_slopes.pop(-1)
        # the new list needs to start with 1,1,1,1 to interpolate up to the first frame, insert it
        if bonename == NAME_FOR_CAMFRAMES:
            thisbone_bezier_slopes.insert(0, [1] * 6)
        else:
            thisbone_bezier_slopes.insert(0, [1] * 4)
        # now every pair is a "depart,approach" associated with a single frame
        final = []
        for i in range(0, len(thisbone_bezier_slopes), 2):
            # now store as pairs
            final.append(
                [thisbone_bezier_slopes[i], thisbone_bezier_slopes[i + 1]])

        assert len(final) == len(boneframe_list)

        # save it!
        allbone_bezier_slopes[bonename] = final
        pass  # end of "for each bone

    # >>>>>> part 2: calculate the x/y position of the control points for the curve, based on the slope
    core.MY_PRINT_FUNC("Calculating control points...")
    allbone_bezier_points = {}
    for bonename in sorted(allbone_bezier_slopes.keys()):
        bezier_for_one_frame = allbone_bezier_slopes[bonename]
        thisbone_bezier_points = []
        for depart_slopes, approach_slopes in bezier_for_one_frame:
            slopes_per_channel = list(zip(depart_slopes, approach_slopes))
            # print(slopes_per_channel)
            depart_points = []
            approach_points = []
            for depart_slope, approach_slope in slopes_per_channel:
                # now i have the approach & depart for one channel of one frame of one bone
                # 1. handle double-sided cutpoint
                if approach_slope == -1 and depart_slope == -1:
                    # this is a double-sided cutpoint!
                    # see where the global is declared to understand the modes
                    if HOW_TO_HANDLE_DOUBLE_CUTPOINT == 1:
                        approach_slope, depart_slope = 0, 0
                    else:  #elif HOW_TO_HANDLE_DOUBLE_CUTPOINT == 2:
                        approach_slope, depart_slope = 1, 1

                # 3a. in this mode the cutpoint is handled BEFORE normal calculation
                if HOW_TO_HANDLE_SINGLE_SIDE_CUTPOINT == 1:
                    if approach_slope == -1:
                        approach_slope = 0
                    if depart_slope == -1:
                        depart_slope = 0

                # 2. base case: calculate the point position based on the slope
                depart_point = (10, 10)
                approach_point = (117, 117)
                if approach_slope != -1:
                    # note: the approach point is based on 127,127
                    approach_point = tuple(
                        127 - p for p in make_point_from_slope(approach_slope))
                if depart_slope != -1:
                    depart_point = make_point_from_slope(depart_slope)

                # 3b. handle the one-sided cutpoint
                if HOW_TO_HANDLE_SINGLE_SIDE_CUTPOINT == 2:
                    # fancy "point at the control point of the other side" idea
                    # define the slope via the opposing control point and re-run step 2
                    if approach_slope == -1:
                        # note: depart_point[0] can be 127, if so then this is divide by 0
                        if depart_point[0] == 127:
                            approach_slope = 1000
                        else:
                            approach_slope = (depart_point[1] -
                                              127) / (depart_point[0] - 127)
                        # note: the approach point is based on 127,127
                        approach_point = tuple(
                            127 - p
                            for p in make_point_from_slope(approach_slope))
                    if depart_slope == -1:
                        # note: approach_point[0] CAN BE 0, in theory.
                        if approach_point[0] == 0:
                            depart_slope = 1000
                        else:
                            depart_slope = approach_point[1] / approach_point[0]
                        depart_point = make_point_from_slope(depart_slope)

                # 4. accumulate teh channels
                depart_points.append(depart_point)
                approach_points.append(approach_point)
                pass  # end "for one channel of one frame of one bone"
            # 5. accumulate all the frames
            thisbone_bezier_points.append([depart_points, approach_points])
            pass  # end "for one frame of one bone"
        # 6. accumulate teh bones
        allbone_bezier_points[bonename] = thisbone_bezier_points
        pass  # end "for one bone"

    # >>>>>> part 3: store this into the boneframe & un-dictify the frames to put it back into the VMD
    for bonename in sorted(boneframe_dict.keys()):
        boneframe_list = boneframe_dict[bonename]
        bezier_points_list = allbone_bezier_points[bonename]
        if bonename == NAME_FOR_CAMFRAMES:
            # this is a list of camframes!
            # for each frame & associated points,
            for camframe, b in zip(boneframe_list, bezier_points_list):
                # print(b)
                # interp = [x_ax, x_bx, x_ay, x_by, 	y_ax, y_bx, y_ay, y_by, 				z_ax, z_bx, z_ay, z_by,
                # 			r_ax, r_bx, r_ay, r_by,		dist_ax, dist_bx, dist_ay, dist_by, 	fov_ax, fov_bx, fov_ay, fov_by]
                interp = [
                    b[0][0][0],
                    b[1][0][0],
                    b[0][0][1],
                    b[1][0][1],
                    b[0][1][0],
                    b[1][1][0],
                    b[0][1][1],
                    b[1][1][1],
                    b[0][2][0],
                    b[1][2][0],
                    b[0][2][1],
                    b[1][2][1],
                    b[0][3][0],
                    b[1][3][0],
                    b[0][3][1],
                    b[1][3][1],
                    b[0][4][0],
                    b[1][4][0],
                    b[0][4][1],
                    b[1][4][1],
                    b[0][5][0],
                    b[1][5][0],
                    b[0][5][1],
                    b[1][5][1],
                ]
                camframe.interp = interp
        else:
            # for each frame & associated points,
            for boneframe, b in zip(boneframe_list, bezier_points_list):
                # print(b)
                # interp = [x_ax, y_ax, z_ax, r_ax, 	x_ay, y_ay, z_ay, r_ay,
                # 			x_bx, y_bx, z_bx, r_bx, 	x_by, y_by, z_by, r_by]
                interp = [
                    b[0][0][0],
                    b[0][1][0],
                    b[0][2][0],
                    b[0][3][0],
                    b[0][0][1],
                    b[0][1][1],
                    b[0][2][1],
                    b[0][3][1],
                    b[1][0][0],
                    b[1][1][0],
                    b[1][2][0],
                    b[1][3][0],
                    b[1][0][1],
                    b[1][1][1],
                    b[1][2][1],
                    b[1][3][1],
                ]
                # this goes into the actual boneframe object still in the lists in boneframe_dict
                boneframe.interp = interp

    # un-dictify it!
    # first, extract the camframes
    if NAME_FOR_CAMFRAMES in boneframe_dict:
        vmd.camframes = boneframe_dict.pop(NAME_FOR_CAMFRAMES)
    # then do the boneframes
    # the names dont matter, make a list of all the lists in the dict
    asdf = list(boneframe_dict.values())
    # flatten it
    flat_boneframe_list = [item for sublist in asdf for item in sublist]
    vmd.boneframes = flat_boneframe_list

    core.MY_PRINT_FUNC("")
    # write out the VMD
    output_filename_vmd = "%s_smoothed.vmd" % input_filename_vmd[0:-4]
    output_filename_vmd = core.get_unused_file_name(output_filename_vmd)
    vmdlib.write_vmd(output_filename_vmd, vmd, moreinfo=moreinfo)

    # H = plt.hist([j for j in ANGLE_SHARPNESS_FACTORS if j!=0 and j!=1], bins=40, density=True)
    print("factors=", len(ANGLE_SHARPNESS_FACTORS))
    H = plt.hist(ANGLE_SHARPNESS_FACTORS, bins=16, density=True)
    plt.show()

    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #24
0
def dispframe_fix(pmx: pmxstruct.Pmx, moreinfo=False):
    # root group: "Root"/"Root"
    # facial group: "表情"/"Exp"

    fix_root = 0
    fix_center = 0
    hidden_morphs_removed = 0
    duplicate_entries_removed = 0
    empty_groups_removed = 0

    # find the ID# for motherbone... if not found, use whatever is at 0
    motherid = core.my_list_search(pmx.bones, lambda x: x.name_jp == "全ての親")
    if motherid is None:
        motherid = 0

    # ensure that "motherbone" and nothing else is in the root:
    for d, frame in enumerate(pmx.frames):
        # only operate on the root group
        if frame.name_jp == "Root" and frame.name_en == "Root" and frame.is_special:
            newframelist = [
                pmxstruct.PmxFrameItem(is_morph=False, idx=motherid)
            ]
            if frame.items != newframelist:
                # if the itemslist is not exactly only motherbone, make it exactly only motherbone
                frame.items = newframelist
                fix_root += 1
            break
    if fix_root and moreinfo:
        core.MY_PRINT_FUNC("fixing root group")

    # fix the contents of the "center"/"センター" group
    # first, find it, or if it does not exist, make it
    centerid = core.my_list_search(pmx.frames, lambda x: x.name_jp == "センター")
    if centerid is None:
        centerid = 2
        newframe = pmxstruct.PmxFrame(name_jp="センター",
                                      name_en="Center",
                                      is_special=False,
                                      items=[])
        pmx.frames.insert(2, newframe)
        fix_center += 1
    # if i set "motherbone" to be root, then remove it from center
    if fix_root:
        removeme = core.my_list_search(pmx.frames[centerid].items,
                                       lambda x: x.idx == motherid)
        if removeme is not None:
            pmx.frames[centerid].items.pop(removeme)
    # ensure center contains the proper semistandard contents: view/center/groove/waist
    # find bone IDs for each of these desired bones
    centerframeboneids = [
        core.my_list_search(pmx.bones, lambda x: x.name_jp == name)
        for name in CENTER_FRAME_BONES
    ]
    for boneid in centerframeboneids:
        # if this bone does not exist, skip
        if boneid is None: continue
        # if this bone already in center, skip
        if any(item.idx == boneid for item in pmx.frames[centerid].items):
            continue
        # add an item for this bone to the group
        newitem = pmxstruct.PmxFrameItem(is_morph=False, idx=boneid)
        pmx.frames[centerid].items.append(newitem)
        # do not count moving a bone from root to center
        fix_center += 1
    if fix_center and moreinfo:
        core.MY_PRINT_FUNC("fixing center group")

    displayed_morphs = set()
    displayed_bones = set()
    # build sets of all bones/morphs that are in the panels
    # delete bones that are in the panels more than once
    # remove all morphs that are group 0
    for d, frame in enumerate(pmx.frames):  # for each display group,
        i = 0
        while i < len(frame.items):  # for each item in that display group,
            item = frame.items[i]
            if item.is_morph:  # if it is a morph
                # look up the morph
                morph = pmx.morphs[item.idx]
                # figure out what panel of this morph is
                # if it has an invalid panel #, discard it
                if morph.panel == pmxstruct.MorphPanel.HIDDEN:
                    frame.items.pop(i)
                    hidden_morphs_removed += 1
                # if this is valid but already in the set of used morphs, discard it
                elif item.idx in displayed_morphs:
                    frame.items.pop(i)
                    duplicate_entries_removed += 1
                # otherwise, add it to set of used morphs
                else:
                    displayed_morphs.add(item.idx)
                    i += 1
            else:  # if it is a bone
                # if this is already in the set of used bones, delete it
                if item.idx in displayed_bones:
                    frame.items.pop(i)
                    duplicate_entries_removed += 1
                # otherwise, add it to set of used bones
                else:
                    displayed_bones.add(item.idx)
                    i += 1

    if hidden_morphs_removed:
        core.MY_PRINT_FUNC("removed %d hidden morphs (cause of crashes)" %
                           hidden_morphs_removed)
        # core.MY_PRINT_FUNC("!!! Warning: do not add 'hidden' morphs to the display group! MMD will crash!")
    if duplicate_entries_removed and moreinfo:
        core.MY_PRINT_FUNC("removed %d duplicate bones or morphs" %
                           duplicate_entries_removed)

    # have identified which bones/morphs are displayed: now identify which ones are NOT
    # want all bones not already in 'displayed_bones' that are also visible and enabled
    undisplayed_bones = [
        d for d, bone in enumerate(pmx.bones)
        if (d not in displayed_bones) and bone.has_visible and bone.has_enabled
    ]
    if undisplayed_bones:
        if moreinfo:
            core.MY_PRINT_FUNC(
                "added %d undisplayed bones to new group 'morebones'" %
                len(undisplayed_bones))
        # add a new frame to hold all bones
        newframelist = [
            pmxstruct.PmxFrameItem(is_morph=False, idx=x)
            for x in undisplayed_bones
        ]
        newframe = pmxstruct.PmxFrame(name_jp="morebones",
                                      name_en="morebones",
                                      is_special=False,
                                      items=newframelist)
        pmx.frames.append(newframe)

    # build list of which morphs are NOT shown
    # want all morphs not already in 'displayed_morphs' that are not hidden
    undisplayed_morphs = [
        d for d, morph in enumerate(pmx.morphs)
        if (d not in displayed_morphs) and (
            morph.panel != pmxstruct.MorphPanel.HIDDEN)
    ]
    if undisplayed_morphs:
        if moreinfo:
            core.MY_PRINT_FUNC("added %d undisplayed morphs to Facials group" %
                               len(undisplayed_morphs))
        newframelist = [
            pmxstruct.PmxFrameItem(is_morph=True, idx=x)
            for x in undisplayed_morphs
        ]
        # find morphs group and only add to it
        # should ALWAYS be at index 1 but whatever might as well be extra safe
        idx = core.my_list_search(
            pmx.frames, lambda x: (x.name_jp == "表情" and x.is_special))
        if idx is not None:
            # concatenate to end of item list
            pmx.frames[idx].items += newframelist
        else:
            core.MY_PRINT_FUNC(
                "ERROR: unable to find semistandard 'expressions' display frame"
            )

    # check if there are too many morphs among all frames... if so, trim and remake "displayed morphs"
    # morphs can theoretically be in any frame, they SHOULD only be in the "expressions" frame but people mess things up
    total_num_morphs = 0
    for frame in pmx.frames:
        i = 0
        while i < len(frame.items):
            # if this is a bone, skip it
            if not frame.items[i].is_morph:
                i += 1
            else:
                # if it is a morph, count it
                total_num_morphs += 1
                # if i have already counted too many morphs, pop it
                if total_num_morphs > MAX_MORPHS_IN_DISPLAY:
                    frame.items.pop(i)
                else:
                    i += 1
    num_morphs_over_limit = max(total_num_morphs - MAX_MORPHS_IN_DISPLAY, 0)
    if num_morphs_over_limit:
        core.MY_PRINT_FUNC(
            "removed %d morphs to stay under the %d morph limit (cause of crashes)"
            % (num_morphs_over_limit, MAX_MORPHS_IN_DISPLAY))
        core.MY_PRINT_FUNC(
            "!!! Warning: do not add the remaining morphs to the display group! MMD will crash!"
        )

    # delete any groups that are empty
    i = 0
    while i < len(pmx.frames):
        frame = pmx.frames[i]
        # if it is empty AND it is not "special" then delete it
        if len(frame.items) == 0 and not frame.is_special:
            pmx.frames.pop(i)
            empty_groups_removed += 1
        else:
            i += 1
    if empty_groups_removed and moreinfo:
        core.MY_PRINT_FUNC("removed %d empty groups" % empty_groups_removed)

    overall = num_morphs_over_limit + \
        fix_center + \
        empty_groups_removed + \
        len(undisplayed_bones) + \
        len(undisplayed_morphs) + \
        duplicate_entries_removed + \
        hidden_morphs_removed + \
        fix_root
    if overall == 0:
        core.MY_PRINT_FUNC("No changes are required")
        return pmx, False

    core.MY_PRINT_FUNC("Fixed %d things related to display pane groups" %
                       overall)
    return pmx, True
Beispiel #25
0
def main(moreinfo=True):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX input file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    # detect whether arm ik exists
    r = core.my_list_search(pmx.bones, lambda x: x.name_jp == jp_r + jp_newik)
    if r is None:
        r = core.my_list_search(pmx.bones,
                                lambda x: x.name_jp == jp_r + jp_newik2)
    l = core.my_list_search(pmx.bones, lambda x: x.name_jp == jp_l + jp_newik)
    if l is None:
        l = core.my_list_search(pmx.bones,
                                lambda x: x.name_jp == jp_l + jp_newik2)

    # decide whether to create or remove arm ik
    if r is None and l is None:
        # add IK branch
        core.MY_PRINT_FUNC(">>>> Adding arm IK <<<")
        # set output name
        if input_filename_pmx.lower().endswith(pmx_noik_suffix.lower()):
            output_filename = input_filename_pmx[0:-(
                len(pmx_noik_suffix))] + pmx_yesik_suffix
        else:
            output_filename = input_filename_pmx[0:-4] + pmx_yesik_suffix
        for side in [jp_l, jp_r]:
            # first find all 3 arm bones
            # even if i insert into the list, this will still be a valid reference i think
            bones = []
            bones: List[pmxstruct.PmxBone]
            for n in [jp_arm, jp_elbow, jp_wrist]:
                i = core.my_list_search(pmx.bones,
                                        lambda x: x.name_jp == side + n,
                                        getitem=True)
                if i is None:
                    core.MY_PRINT_FUNC(
                        "ERROR1: semistandard bone '%s' is missing from the model, unable to create attached arm IK"
                        % (side + n))
                    raise RuntimeError()
                bones.append(i)
            # get parent of arm bone
            shoulder_idx = bones[0].parent_idx

            # then do the "remapping" on all existing bone references, to make space for inserting 4 bones
            # don't delete any bones, just remap them
            bone_shiftmap = ([shoulder_idx + 1], [-4])
            apply_bone_remapping(pmx, [], bone_shiftmap)
            # new bones will be inserted AFTER shoulder_idx
            # newarm_idx = shoulder_idx+1
            # newelbow_idx = shoulder_idx+2
            # newwrist_idx = shoulder_idx+3
            # newik_idx = shoulder_idx+4

            # make copies of the 3 armchain bones
            for i, b in enumerate(bones):
                b: pmxstruct.PmxBone

                # newarm = b[0:5] + [shoulder_idx + i] + b[6:8]  # copy names/pos, parent, copy deform layer
                # newarm += [1, 0, 0, 0]  # rotateable, not translateable, not visible, not enabled(?)
                # newarm += [1, [shoulder_idx + 2 + i], 0, 0, [], 0, []]  # tail type, no inherit, no fixed axis,
                # newarm += b[19:21] + [0, [], 0, []]  # copy local axis, no ext parent, no ik
                # newarm[0] += jp_ikchainsuffix  # add suffix to jp name
                # newarm[1] += jp_ikchainsuffix  # add suffix to en name
                newarm = pmxstruct.PmxBone(
                    name_jp=b.name_jp + jp_ikchainsuffix,
                    name_en=b.name_en + jp_ikchainsuffix,
                    pos=b.pos,
                    parent_idx=b.parent_idx,
                    deform_layer=b.deform_layer,
                    deform_after_phys=b.deform_after_phys,
                    has_rotate=True,
                    has_translate=False,
                    has_visible=False,
                    has_enabled=True,
                    tail_type=True,
                    tail=shoulder_idx + 2 + i,
                    inherit_rot=False,
                    inherit_trans=False,
                    has_fixedaxis=False,
                    has_localaxis=b.has_localaxis,
                    localaxis_x=b.localaxis_x,
                    localaxis_z=b.localaxis_z,
                    has_externalparent=False,
                    has_ik=False,
                )
                pmx.bones.insert(shoulder_idx + 1 + i, newarm)
                # then change the existing arm/elbow (not the wrist) to inherit rot from them
                if i != 2:
                    b.inherit_rot = True
                    b.inherit_parent_idx = shoulder_idx + 1 + i
                    b.inherit_ratio = 1

            # copy the wrist to make the IK bone
            en_suffix = "_L" if side == jp_l else "_R"
            # get index of "upperbody" to use as parent of hand IK bone
            ikpar = core.my_list_search(pmx.bones,
                                        lambda x: x.name_jp == jp_upperbody)
            if ikpar is None:
                core.MY_PRINT_FUNC(
                    "ERROR1: semistandard bone '%s' is missing from the model, unable to create attached arm IK"
                    % jp_upperbody)
                raise RuntimeError()

            # newik = [side + jp_newik, en_newik + en_suffix] + bones[2][2:5] + [ikpar]  # new names, copy pos, new par
            # newik += bones[2][6:8] + [1, 1, 1, 1]  + [0, [0,1,0]] # copy deform layer, rot/trans/vis/en, tail type
            # newik += [0, 0, [], 0, [], 0, [], 0, []]  # no inherit, no fixed axis, no local axis, no ext parent, yes IK
            # # add the ik info: [is_ik, [target, loops, anglelimit, [[link_idx, []]], [link_idx, []]]] ] ]
            # newik += [1, [shoulder_idx+3, newik_loops, newik_angle, [[shoulder_idx+2,[]],[shoulder_idx+1,[]]] ] ]
            newik = pmxstruct.PmxBone(
                name_jp=side + jp_newik,
                name_en=en_newik + en_suffix,
                pos=bones[2].pos,
                parent_idx=ikpar,
                deform_layer=bones[2].deform_layer,
                deform_after_phys=bones[2].deform_after_phys,
                has_rotate=True,
                has_translate=True,
                has_visible=True,
                has_enabled=True,
                tail_type=False,
                tail=[0, 1, 0],
                inherit_rot=False,
                inherit_trans=False,
                has_fixedaxis=False,
                has_localaxis=False,
                has_externalparent=False,
                has_ik=True,
                ik_target_idx=shoulder_idx + 3,
                ik_numloops=newik_loops,
                ik_angle=newik_angle,
                ik_links=[
                    pmxstruct.PmxBoneIkLink(idx=shoulder_idx + 2),
                    pmxstruct.PmxBoneIkLink(idx=shoulder_idx + 1)
                ])
            pmx.bones.insert(shoulder_idx + 4, newik)

            # then add to dispframe
            # first, does the frame already exist?
            f = core.my_list_search(pmx.frames,
                                    lambda x: x.name_jp == jp_newik,
                                    getitem=True)
            if f is None:
                # need to create the new dispframe! easy
                newframe = pmxstruct.PmxFrame(name_jp=jp_newik,
                                              name_en=en_newik,
                                              is_special=False,
                                              items=[[0, shoulder_idx + 4]])
                pmx.frames.append(newframe)
            else:
                # frame already exists, also easy
                f.items.append([0, shoulder_idx + 4])
    else:
        # remove IK branch
        core.MY_PRINT_FUNC(">>>> Removing arm IK <<<")
        # set output name
        if input_filename_pmx.lower().endswith(pmx_yesik_suffix.lower()):
            output_filename = input_filename_pmx[0:-(
                len(pmx_yesik_suffix))] + pmx_noik_suffix
        else:
            output_filename = input_filename_pmx[0:-4] + pmx_noik_suffix
        # identify all bones in ik chain of hand ik bones
        bone_dellist = []
        for b in [r, l]:
            bone_dellist.append(b)  # this IK bone
            bone_dellist.append(
                pmx.bones[b].ik_target_idx)  # the target of the bone
            for v in pmx.bones[b].ik_links:
                bone_dellist.append(v.idx)  # each link along the bone
        bone_dellist.sort()
        # build the remap thing
        bone_shiftmap = delme_list_to_rangemap(bone_dellist)
        # do the actual delete & shift
        apply_bone_remapping(pmx, bone_dellist, bone_shiftmap)

        # delete dispframe for hand ik
        # first, does the frame already exist?
        f = core.my_list_search(pmx.frames, lambda x: x.name_jp == jp_newik)
        if f is not None:
            # frame already exists, delete it
            pmx.frames.pop(f)

        pass

    # write out
    output_filename = core.get_unused_file_name(output_filename)
    pmxlib.write_pmx(output_filename, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #26
0
def main(moreinfo=True):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX input file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    core.MY_PRINT_FUNC("")
    # valid input is any string that can matched aginst a morph idx
    s = core.MY_GENERAL_INPUT_FUNC(
        lambda x: get_idx_in_pmxsublist(x, pmx.morphs) is not None, [
            "Please specify the target morph: morph #, JP name, or EN name (names are not case sensitive).",
            "Empty input will quit the script."
        ])
    # do it again, cuz the lambda only returns true/false
    target_index = get_idx_in_pmxsublist(s, pmx.morphs)

    # when given empty text, done!
    if target_index == -1 or target_index is None:
        core.MY_PRINT_FUNC("quitting")
        return None

    # determine the morph type
    morphtype = pmx.morphs[target_index].morphtype
    core.MY_PRINT_FUNC("Found {} morph #{}: '{}' / '{}'".format(
        mtype_dict[morphtype], target_index, pmx.morphs[target_index].name_jp,
        pmx.morphs[target_index].name_en))

    # if it is a bone morph, ask for translation/rotation/both
    bone_mode = 0
    if morphtype == 2:
        bone_mode = core.MY_SIMPLECHOICE_FUNC((1, 2, 3), [
            "Bone morph detected: do you want to scale the motion(translation), rotation, or both?",
            "1 = motion(translation), 2 = rotation, 3 = both"
        ])

    # ask for factor: keep looping this prompt until getting a valid float
    def is_float(x):
        try:
            v = float(x)
            return True
        except ValueError:
            core.MY_PRINT_FUNC("Please enter a decimal number")
            return False

    factor_str = core.MY_GENERAL_INPUT_FUNC(
        is_float, "Enter the factor that you want to scale this morph by:")
    if factor_str == "":
        core.MY_PRINT_FUNC("quitting")
        return None
    factor = float(factor_str)

    # important values: target_index, factor, morphtype, bone_mode
    # first create the new morph that is a copy of current
    if SCALE_MORPH_IN_PLACE:
        newmorph = pmx.morphs[target_index]
    else:
        newmorph = copy.deepcopy(pmx.morphs[target_index])
        # then modify the names
        name_suffix = "*" + (str(factor)[0:6])
        newmorph.name_jp += name_suffix
        newmorph.name_en += name_suffix
    # now scale the actual values

    r = morph_scale(newmorph, factor, bone_mode)

    if not r:
        core.MY_PRINT_FUNC("quitting")
        return None

    pmx.morphs.append(newmorph)

    # write out
    output_filename_pmx = input_filename_pmx[0:-4] + ("_%dscal.pmx" %
                                                      target_index)
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Beispiel #27
0
def showhelp():
    # print info to explain the purpose of this file
    core.MY_PRINT_FUNC(helptext)
Beispiel #28
0
def morph_scale(morph: pmxstruct.PmxMorph,
                scale: Union[List[float], float],
                bone_mode=0) -> bool:
    # note: this function allows for X/Y/Z dimensions to be scaled by different values, but the interface still only allows
    # scaling all 3 by the same value.
    # bone_mode: 1 = motion(translation), 2 = rotation, 3 = both
    # return false if it somehow has an invalid morph type, return true otherwise

    # independent x/y/z scale for bone & vertex morphs
    # UV and UV# morphs have independent x/y/z/w
    # material morphs only use one value

    # accept scale as either int/float or list of 3 int/float
    if isinstance(scale, int) or isinstance(scale, float):
        scale = [scale] * 4
    if len(scale) < 4:
        scale.extend([1] * (4 - len(scale)))

    if morph.morphtype == 2:  # bone
        # bone_mode: 1 = motion(translation), 2 = rotation, 3 = both
        if bone_mode in (2, 3):  # if ==2 or ==3, then do rotation
            for d, item in enumerate(morph.items):
                item: pmxstruct.PmxMorphItemBone  # type annotation for pycharm
                # i guess scaling in euclid-space is good enough? assuming all resulting components are <180
                # most bone morphs only rotate around one axis anyways
                item.rot = [x * s for x, s in zip(item.rot, scale)]
        if bone_mode in (1, 3):  # if ==1 or ==3, then do translation
            for d, item in enumerate(morph.items):
                item: pmxstruct.PmxMorphItemBone  # type annotation for pycharm
                # scale the morph XYZ
                item.move = [x * s for x, s in zip(item.move, scale)]
    elif morph.morphtype == 1:  # vertex
        # for each item in this morph:
        for d, item in enumerate(morph.items):
            item: pmxstruct.PmxMorphItemVertex  # type annotation for pycharm
            # scale the morph XYZ
            item.move = [x * s for x, s in zip(item.move, scale)]
    elif morph.morphtype in (3, 4, 5, 6, 7):  # UV  UV1 UV2 UV3 UV4
        for d, item in enumerate(morph.items):
            item: pmxstruct.PmxMorphItemUV  # type annotation for pycharm
            # scale the morph UV
            item.move = [x * s for x, s in zip(item.move, scale)]
    elif morph.morphtype == 8:  # material
        core.MY_PRINT_FUNC("material morph is WIP")
        for d, item in enumerate(morph.items):
            item: pmxstruct.PmxMorphItemMaterial  # type annotation for pycharm
            if item.is_add:
                # to scale additive morphs, just scale like normal
                item.alpha *= scale[0]
                item.specpower *= scale[0]
                item.edgealpha *= scale[0]
                item.edgesize *= scale[0]
                item.diffRGB = [d * scale[0] for d in item.diffRGB]
                item.specRGB = [d * scale[0] for d in item.specRGB]
                item.ambRGB = [d * scale[0] for d in item.ambRGB]
                item.edgeRGB = [d * scale[0] for d in item.edgeRGB]
                item.texRGBA = [d * scale[0] for d in item.texRGBA]
                item.toonRGBA = [d * scale[0] for d in item.toonRGBA]
                item.sphRGBA = [d * scale[0] for d in item.sphRGBA]
            else:
                # but to scale multiplicative morphs, scale around 1! meaning subtract 1, then scale, then add 1
                item.alpha = ((item.alpha - 1) * scale[0]) + 1
                item.specpower = ((item.specpower - 1) * scale[0]) + 1
                item.edgealpha = ((item.edgealpha - 1) * scale[0]) + 1
                item.edgesize = ((item.edgesize - 1) * scale[0]) + 1
                item.diffRGB = [((d - 1) * scale[0]) + 1 for d in item.diffRGB]
                item.specRGB = [((d - 1) * scale[0]) + 1 for d in item.specRGB]
                item.ambRGB = [((d - 1) * scale[0]) + 1 for d in item.ambRGB]
                item.edgeRGB = [((d - 1) * scale[0]) + 1 for d in item.edgeRGB]
                item.texRGBA = [((d - 1) * scale[0]) + 1 for d in item.texRGBA]
                item.toonRGBA = [((d - 1) * scale[0]) + 1
                                 for d in item.toonRGBA]
                item.sphRGBA = [((d - 1) * scale[0]) + 1 for d in item.sphRGBA]
    else:
        core.MY_PRINT_FUNC("Unhandled morph type")
        return False
    return True
Beispiel #29
0
	
	if input_filename.lower().endswith((".vmd", ".vmd.bak")):
		# activate correct function
		convert_vmd_to_txt(input_filename, moreinfo=moreinfo)
	else:
		# activate correct function
		convert_txt_to_vmd(input_filename, moreinfo=moreinfo)
	core.MY_PRINT_FUNC("Done!")
	return
	
########################################################################################################################
# after all the funtions are defined, actually execute main()
########################################################################################################################

if __name__ == '__main__':
	core.MY_PRINT_FUNC("Nuthouse01 - 10/10/2020 - v5.03")
	if DEBUG:
		# print info to explain the purpose of this file
		core.MY_PRINT_FUNC(helptext)
		core.MY_PRINT_FUNC("")
		
		main()
		core.pause_and_quit("Done with everything! Goodbye!")
	else:
		try:
			# print info to explain the purpose of this file
			core.MY_PRINT_FUNC(helptext)
			core.MY_PRINT_FUNC("")
			
			main()
			core.pause_and_quit("Done with everything! Goodbye!")
Beispiel #30
0
def main(moreinfo=False):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX model file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    #### how should these operations be ordered?
    # faces before verts, because faces define what verts are used
    # verts before weights, so i operate on fewer vertices & run faster
    # weights before bones, because weights determine what bones are used
    # verts before morph winnow, so i operate on fewer vertices & run faster
    # translate after bones/disp groups/morph winnow because they reduce the # of things to translate
    # uniquify after translate, because translate can map multiple different JP to same EN names
    # alphamorphs after translate, so it uses post-translate names for printing
    # deform order after translate, so it uses post-translate names for printing

    # if ANY stage returns True then it has made changes
    # final file-write is skipped only if NO stage has made changes
    is_changed = False
    core.MY_PRINT_FUNC("\n>>>> Deleting invalid & duplicate faces <<<<")
    pmx, is_changed_t = _prune_invalid_faces.prune_invalid_faces(pmx, moreinfo)
    is_changed |= is_changed_t  # or-equals: if any component returns true, then ultimately this func returns true
    core.MY_PRINT_FUNC("\n>>>> Deleting orphaned/unused vertices <<<<")
    pmx, is_changed_t = _prune_unused_vertices.prune_unused_vertices(
        pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC("\n>>>> Deleting unused bones <<<<")
    pmx, is_changed_t = _prune_unused_bones.prune_unused_bones(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC("\n>>>> Normalizing vertex weights & normals <<<<")
    pmx, is_changed_t = _weight_cleanup.weight_cleanup(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC("\n>>>> Pruning imperceptible vertex morphs <<<<")
    pmx, is_changed_t = _morph_winnow.morph_winnow(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC(
        "\n>>>> Fixing display groups: duplicates, empty groups, missing items <<<<"
    )
    pmx, is_changed_t = _dispframe_fix.dispframe_fix(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC("\n>>>> Adding missing English names <<<<")
    pmx, is_changed_t = _translate_to_english.translate_to_english(
        pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC(
        "\n>>>> Ensuring all names in the model are unique <<<<")
    pmx, is_changed_t = _uniquify_names.uniquify_names(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC("\n>>>> Fixing bone deform order <<<<")
    pmx, is_changed_t = _bonedeform_fix.bonedeform_fix(pmx, moreinfo)
    is_changed |= is_changed_t
    core.MY_PRINT_FUNC(
        "\n>>>> Standardizing alphamorphs and accounting for edging <<<<")
    pmx, is_changed_t = _alphamorph_correct.alphamorph_correct(pmx, moreinfo)
    is_changed |= is_changed_t

    core.MY_PRINT_FUNC("")
    core.MY_PRINT_FUNC(
        "++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
    core.MY_PRINT_FUNC(
        "++++      Scanning for other potential issues      ++++")
    core.MY_PRINT_FUNC("")

    num_badnames = find_shiftjis_unsupported_names(pmx, input_filename_pmx)
    if num_badnames:
        core.MY_PRINT_FUNC(
            "WARNING: found %d JP names that cannot be encoded with SHIFT-JIS, please replace the bad characters in the strings printed above!"
            % num_badnames)
        core.MY_PRINT_FUNC(
            "If the filepath contains bad characters, then MMD project files (.pmm .emm) will not properly store/load model data between sessions."
        )
        core.MY_PRINT_FUNC(
            "If the modelname/bones/morphs contain bad characters, then they will work just fine in MMD but will not properly save/load in VMD motion files."
        )

    longbone, longmorph = find_toolong_bonemorph(pmx)
    # also checks that bone/morph names can be stored in shift_jis for VMD usage
    if longmorph or longbone:
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC(
            "Minor warning: this model contains bones/morphs with JP names that are too long (>15 bytes)."
        )
        core.MY_PRINT_FUNC(
            "These will work just fine in MMD but will not properly save/load in VMD motion files."
        )
        if longbone:
            ss = "[" + ", ".join(longbone[0:MAX_WARNING_LIST]) + "]"
            if len(longbone) > MAX_WARNING_LIST:
                ss = ss[0:-1] + ", ...]"
            core.MY_PRINT_FUNC(
                "These %d bones are too long (index[length]): %s" %
                (len(longbone), ss))
        if longmorph:
            ss = "[" + ", ".join(longmorph[0:MAX_WARNING_LIST]) + "]"
            if len(longmorph) > MAX_WARNING_LIST:
                ss = ss[0:-1] + ", ...]"
            core.MY_PRINT_FUNC(
                "These %d morphs are too long (index[length]): %s" %
                (len(longmorph), ss))

    shadowy_mats = find_shadowy_materials(pmx)
    if shadowy_mats:
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC(
            "Minor warning: this model contains transparent materials with visible edging."
        )
        core.MY_PRINT_FUNC(
            "Edging is visible even if the material is transparent, so this will look like an ugly silhouette."
        )
        core.MY_PRINT_FUNC(
            "Either disable edging in MMD when using this model, or reduce the edge parameters to 0 and re-add them in the morph that restores its opacity."
        )
        ss = str(shadowy_mats[0:MAX_WARNING_LIST])
        if len(shadowy_mats) > MAX_WARNING_LIST:
            ss = ss[0:-1] + ", ...]"
        core.MY_PRINT_FUNC(
            "These %d materials need edging disabled (index): %s" %
            (len(shadowy_mats), ss))

    boneless_bodies = find_boneless_bonebodies(pmx)
    if boneless_bodies:
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC(
            "WARNING: this model has bone-type rigidbodies that aren't anchored to any bones."
        )
        core.MY_PRINT_FUNC(
            "This won't crash MMD but it is probably a mistake that needs corrected."
        )
        ss = str(boneless_bodies[0:MAX_WARNING_LIST])
        if len(boneless_bodies) > MAX_WARNING_LIST:
            ss = ss[0:-1] + ", ...]"
        core.MY_PRINT_FUNC("These %d bodies are boneless (index): %s" %
                           (len(boneless_bodies), ss))

    jointless_bodies = find_jointless_physbodies(pmx)
    if jointless_bodies:
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC(
            "WARNING: this model has physics-type rigidbodies that aren't constrained by joints."
        )
        core.MY_PRINT_FUNC(
            "These will just roll around on the floor wasting processing power in MMD."
        )
        ss = str(jointless_bodies[0:MAX_WARNING_LIST])
        if len(jointless_bodies) > MAX_WARNING_LIST:
            ss = ss[0:-1] + ", ...]"
        core.MY_PRINT_FUNC("These %d bodies are jointless (index): %s" %
                           (len(jointless_bodies), ss))

    crashing_joints = find_crashing_joints(pmx)
    if crashing_joints:
        # make the biggest f*****g alert i can cuz this is a critical issue
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC(
            "! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ")
        core.MY_PRINT_FUNC(
            "CRITICAL WARNING: this model contains invalid joints which WILL cause MMD to crash!"
        )
        core.MY_PRINT_FUNC(
            "These must be manually deleted or repaired using PMXE.")
        core.MY_PRINT_FUNC("These %d joints are invalid (index): %s" %
                           (len(crashing_joints), crashing_joints))

    core.MY_PRINT_FUNC("")
    core.MY_PRINT_FUNC(
        "++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
    if not is_changed:
        core.MY_PRINT_FUNC(
            "++++             No writeback required              ++++")
        core.MY_PRINT_FUNC("Done!")
        return

    core.MY_PRINT_FUNC(
        "++++ Done with cleanup, saving improvements to file ++++")

    # write out
    # output_filename_pmx = "%s_better.pmx" % core.get_clean_basename(input_filename_pmx)
    output_filename_pmx = input_filename_pmx[0:-4] + "_better.pmx"
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None