def read_vmd(vmd_filename: str, moreinfo=False) -> vmdstruct.Vmd: vmd_filename_clean = core.get_clean_basename(vmd_filename) + ".vmd" # creates object (header, boneframe_list, morphframe_list, camframe_list, lightframe_list, shadowframe_list, ikdispframe_list) # assumes the calling function already verified correct file extension core.MY_PRINT_FUNC("Begin reading VMD file '%s'" % vmd_filename_clean) vmd_bytes = core.read_binfile_to_bytes(vmd_filename) core.MY_PRINT_FUNC("...total size = %sKB" % round(len(vmd_bytes) / 1024)) core.MY_PRINT_FUNC("Begin parsing VMD file '%s'" % vmd_filename_clean) core.reset_unpack() core.set_encoding("shift_jis") # !!!! this does eliminate all the garbage data MMD used to pack strings so this isnt 100% reversable !!! # read the bytes object and return all the data from teh VMD broken up into a list of lists # also convert things from packed formats to human-readable scales # (quaternion to euler, radians to degrees, floats to ints, etc) # also generate the bonedict and morphdict core.print_progress_oneline(0) A = parse_vmd_header(vmd_bytes, moreinfo) B = parse_vmd_boneframe(vmd_bytes, moreinfo) C = parse_vmd_morphframe(vmd_bytes, moreinfo) D = parse_vmd_camframe(vmd_bytes, moreinfo) E = parse_vmd_lightframe(vmd_bytes, moreinfo) F = parse_vmd_shadowframe(vmd_bytes, moreinfo) G = parse_vmd_ikdispframe(vmd_bytes, moreinfo) if moreinfo: core.print_failed_decodes() bytes_remain = len(vmd_bytes) - core.get_readfrom_byte() if bytes_remain != 0: # padding with my SIGNATURE is acceptable, anything else is strange leftover = vmd_bytes[core.get_readfrom_byte():] if leftover == bytes(SIGNATURE, encoding="shift_jis"): core.MY_PRINT_FUNC( "...note: this VMD file was previously modified with this tool!" ) else: core.MY_PRINT_FUNC( "Warning: finished parsing but %d bytes are left over at the tail!" % bytes_remain) core.MY_PRINT_FUNC( "The file may be corrupt or maybe it contains unknown/unsupported data formats" ) core.MY_PRINT_FUNC(leftover) core.MY_PRINT_FUNC("Done parsing VMD file '%s'" % vmd_filename_clean) vmd = vmdstruct.Vmd(A, B, C, D, E, F, G) # this is where sorting happens, if it happens if GUARANTEE_FRAMES_SORTED: # bones & morphs: primarily sorted by NAME, with FRAME# as tiebreaker. the second sort is the primary one. vmd.boneframes.sort(key=lambda x: x.f) # frame# vmd.boneframes.sort(key=lambda x: x.name) # name vmd.morphframes.sort(key=lambda x: x.f) vmd.morphframes.sort(key=lambda x: x.name) # all of these only sort by frame number. vmd.camframes.sort(key=lambda x: x.f) # frame# vmd.lightframes.sort(key=lambda x: x.f) vmd.shadowframes.sort(key=lambda x: x.f) vmd.ikdispframes.sort(key=lambda x: x.f) return vmd
def encode_vmd_camframe(nice: List[vmdstruct.VmdCamFrame], moreinfo: bool) -> bytearray: output = bytearray() ########################################### # cam frames # first, the number of frames if moreinfo: core.MY_PRINT_FUNC("...# of camframes = %d" % len(nice)) output += core.my_pack(fmt_number, len(nice)) # then, all the actual frames for i, frame in enumerate(nice): xyz_rads = [math.radians(j) for j in frame.rot] # degrees to radians try: packme = [ frame.f, frame.dist, *frame.pos, *xyz_rads, *frame.interp, frame.fov, frame.perspective ] output += core.my_pack(fmt_camframe, packme) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("line=", i) core.MY_PRINT_FUNC("section=camframe") core.MY_PRINT_FUNC( "Err: something went wrong while synthesizing binary output, probably the wrong type/order of values on a line" ) raise RuntimeError() # progress thing just because core.print_progress_oneline(i / len(nice)) return output
def encode_vmd_morphframe(nice: List[vmdstruct.VmdMorphFrame], moreinfo: bool) -> bytearray: output = bytearray() ########################################### # morph frames # first, the number of frames if moreinfo: core.MY_PRINT_FUNC("...# of morphframes = %d" % len(nice)) output += core.my_pack(fmt_number, len(nice)) # then, all the actual frames for i, frame in enumerate(nice): try: output += core.my_pack(fmt_morphframe, [frame.name, frame.f, frame.val]) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("line=", i) core.MY_PRINT_FUNC("section=morphframe") core.MY_PRINT_FUNC( "Err: something went wrong while synthesizing binary output, probably the wrong type/order of values on a line" ) raise RuntimeError() # print a progress update every so often just because core.print_progress_oneline(ENCODE_PERCENT_BONE + (ENCODE_PERCENT_MORPH * i / len(nice))) return output
def parse_vmd_morphframe(raw:bytearray, moreinfo:bool) -> List[vmdstruct.VmdMorphFrame]: # get all the morph-frames, store in a list of lists morphframe_list = [] # is there enough file left to read a single number? if (len(raw) - core.get_readfrom_byte()) < struct.calcsize(fmt_number): core.MY_PRINT_FUNC("Warning: expected morphframe_ct field but file ended unexpectedly! Assuming 0 morphframes and continuing...") return morphframe_list ############################ # get the number of morph frames morphframe_ct = core.my_unpack(fmt_number, raw) if moreinfo: core.MY_PRINT_FUNC("...# of morphframes = %d" % morphframe_ct) for z in range(morphframe_ct): try: # unpack the morphframe (mname_str, f, v) = core.my_unpack(fmt_morphframe, raw) morphframe_list.append(vmdstruct.VmdMorphFrame(name=mname_str, f=f, val=v)) # display progress printouts core.print_progress_oneline(core.get_readfrom_byte() / len(raw)) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("frame=", z) core.MY_PRINT_FUNC("totalframes=", morphframe_ct) core.MY_PRINT_FUNC("section=morphframe") core.MY_PRINT_FUNC("Err: something went wrong while parsing, file is probably corrupt/malformed") raise RuntimeError() return morphframe_list
def write_vmd(vmd_filename: str, vmd: vmdstruct.Vmd, moreinfo=False): vmd_filename_clean = core.get_clean_basename(vmd_filename) + ".vmd" # recives object (header, boneframe_list, morphframe_list, camframe_list, lightframe_list, shadowframe_list, ikdispframe_list) # first, verify that the data is valid before trying to write vmd.validate() # assumes the calling function already verified correct file extension core.MY_PRINT_FUNC("Begin encoding VMD file '%s'" % vmd_filename_clean) core.set_encoding("shift_jis") core.print_progress_oneline(0) # this is where sorting happens, if it happens if GUARANTEE_FRAMES_SORTED: # bones & morphs: primarily sorted by NAME, with FRAME# as tiebreaker. the second sort is the primary one. vmd.boneframes.sort(key=lambda x: x.f) # frame# vmd.boneframes.sort(key=lambda x: x.name) # name vmd.morphframes.sort(key=lambda x: x.f) vmd.morphframes.sort(key=lambda x: x.name) # all of these only sort by frame number. vmd.camframes.sort(key=lambda x: x.f) # frame# vmd.lightframes.sort(key=lambda x: x.f) vmd.shadowframes.sort(key=lambda x: x.f) vmd.ikdispframes.sort(key=lambda x: x.f) global ENCODE_PERCENT_BONE global ENCODE_PERCENT_MORPH # cam is not included cuz a file contains only bone+morph OR cam total_bone = len(vmd.boneframes) * ENCODE_FACTOR_BONE total_morph = len(vmd.morphframes) * ENCODE_FACTOR_MORPH ALLENCODE = total_bone + total_morph if ALLENCODE == 0: ALLENCODE = 1 # just a bandaid to avoid zero-div error when writing empty VMD ENCODE_PERCENT_BONE = total_bone / ALLENCODE ENCODE_PERCENT_MORPH = total_morph / ALLENCODE # arg "vmd" is the same structure created by "parse_vmd()" # assume the object is perfect, no sanity-checking needed, it will all be done when parsing the text input output_bytes = bytearray() output_bytes += encode_vmd_header(vmd.header, moreinfo) output_bytes += encode_vmd_boneframe(vmd.boneframes, moreinfo) output_bytes += encode_vmd_morphframe(vmd.morphframes, moreinfo) output_bytes += encode_vmd_camframe(vmd.camframes, moreinfo) output_bytes += encode_vmd_lightframe(vmd.lightframes, moreinfo) output_bytes += encode_vmd_shadowframe(vmd.shadowframes, moreinfo) output_bytes += encode_vmd_ikdispframe(vmd.ikdispframes, moreinfo) # done encoding!! # add a cheeky little binary stamp just to prove that people actually used my tool :) if APPEND_SIGNATURE: # signature to prove that this file was created with this tool output_bytes += bytes(SIGNATURE, encoding="shift_jis") core.MY_PRINT_FUNC("Begin writing VMD file '%s'" % vmd_filename_clean) core.MY_PRINT_FUNC("...total size = %s" % core.prettyprint_file_size(len(output_bytes))) core.write_bytes_to_binfile(vmd_filename, output_bytes) core.MY_PRINT_FUNC("Done writing VMD file '%s'" % vmd_filename_clean) # done with everything! return
def read_vmdtext_camframe(rawlist_text: List[list]) -> List[vmdstruct.VmdCamFrame]: ########################################### # cam frames global readfrom_line cam_list = [] check2_match_first_item(rawlist_text, keystr_camframect) camframe_ct = rawlist_text[readfrom_line][1] core.MY_PRINT_FUNC("...# of camframes = %d" % camframe_ct) readfrom_line += 1 if camframe_ct > 0: # ensure the key-line is where i think it is check3_match_keystr(rawlist_text, keystr_camframekey) # if it is indeed here, then inc the readpointer readfrom_line += 1 for i in range(camframe_ct): # ensure it has the right # of items on the line check1_match_len(rawlist_text, len(keystr_camframekey)) r = rawlist_text[readfrom_line] newframe = vmdstruct.VmdCamFrame(f=r[0], dist=r[1], pos=r[2:5], rot=r[5:8], interp=r[8:32], fov=r[32], perspective=r[33]) cam_list.append(newframe) # increment the readfrom_line pointer readfrom_line += 1 # progress tracker just because core.print_progress_oneline(i / camframe_ct) return cam_list
def read_vmdtext_morphframe(rawlist_text: List[list]) -> List[vmdstruct.VmdMorphFrame]: ########################################### # morph frames global readfrom_line morph_list = [] # first check for bad format check2_match_first_item(rawlist_text, keystr_morphframect) morphframe_ct = rawlist_text[readfrom_line][1] core.MY_PRINT_FUNC("...# of morphframes = %d" % morphframe_ct) readfrom_line += 1 if morphframe_ct > 0: # ensure the key-line is where i think it is check3_match_keystr(rawlist_text, keystr_morphframekey) # if it is indeed here, then inc the readpointer readfrom_line += 1 for i in range(morphframe_ct): # ensure it has the right # of items on the line check1_match_len(rawlist_text, len(keystr_morphframekey)) r = rawlist_text[readfrom_line] newframe = vmdstruct.VmdMorphFrame(name=r[0], f=r[1], val=r[2]) morph_list.append(newframe) # increment the readfrom_line pointer readfrom_line += 1 # progress tracker just because core.print_progress_oneline(i / morphframe_ct) return morph_list
def read_vmdtext_boneframe(rawlist_text: List[list]) -> List[vmdstruct.VmdBoneFrame]: ############################# # bone frames global readfrom_line bone_list = [] # first, check for bad format check2_match_first_item(rawlist_text, keystr_boneframect) boneframe_ct = rawlist_text[readfrom_line][1] readfrom_line += 1 core.MY_PRINT_FUNC("...# of boneframes = %d" % boneframe_ct) if boneframe_ct > 0: # ensure the key-line is where i think it is check3_match_keystr(rawlist_text, keystr_boneframekey) # if it is indeed here, then inc the readpointer readfrom_line += 1 for i in range(boneframe_ct): # ensure it has the right # of items on the line check1_match_len(rawlist_text, len(keystr_boneframekey)) # the nicelist has angles in euler format, don't convert the values here r = rawlist_text[readfrom_line] newframe = vmdstruct.VmdBoneFrame(name=r[0], f=r[1], pos=r[2:5], rot=r[5:8], phys_off=r[8], interp=r[9:]) bone_list.append(newframe) # increment the readfrom_line pointer readfrom_line += 1 # progress tracker just because core.print_progress_oneline(i / boneframe_ct) return bone_list
def parse_vmd_boneframe(raw:bytearray, moreinfo:bool) -> List[vmdstruct.VmdBoneFrame]: # get all the bone-frames, store in a list of lists boneframe_list = [] # verify that there is enough file left to read a single number if (len(raw) - core.get_readfrom_byte()) < struct.calcsize(fmt_number): core.MY_PRINT_FUNC("Warning: expected boneframe_ct field but file ended unexpectedly! Assuming 0 boneframes and continuing...") return boneframe_list ############################ # get the number of bone-frames boneframe_ct = core.my_unpack(fmt_number, raw) if moreinfo: core.MY_PRINT_FUNC("...# of boneframes = %d" % boneframe_ct) for z in range(boneframe_ct): try: # unpack the bone-frame into variables (bname_str, f, xp, yp, zp, xrot_q, yrot_q, zrot_q, wrot_q) = core.my_unpack(fmt_boneframe_no_interpcurve, raw) # break inter_curve into its individual pieces, knowing that the 3rd and 4th bytes in line1 are overwritten with phys # therefore we need to get their data from line2 which is left-shifted by 1 byte, but otherwise a copy (x_ax, y_ax, phys1, phys2, x_ay, y_ay, z_ay, r_ay, x_bx, y_bx, z_bx, r_bx, x_by, y_by, z_by, r_by, z_ax, r_ax) = core.my_unpack(fmt_boneframe_interpcurve, raw) # convert the quaternion angles to euler angles (xrot, yrot, zrot) = core.quaternion_to_euler([wrot_q, xrot_q, yrot_q, zrot_q]) # interpret the physics enable/disable bytes if (phys1, phys2) == (z_ax, r_ax): # if they match the values they should be, they were never overwritten in the first place??? phys_off = False elif (phys1, phys2) == (0, 0): # phys stays on phys_off = False elif (phys1, phys2) == (99, 15): # phys turns off phys_off = True else: core.MY_PRINT_FUNC("Warning: found unusual values where I expected to find physics enable/disable! Assuming this means physics off") core.MY_PRINT_FUNC(bname_str, "f=", str(f), "(phys1,phys2)=", str((phys1, phys2))) phys_off = True # store them all on the list # create a list to hold all the boneframe data, then append it onto the return-list interp_list = [x_ax, y_ax, z_ax, r_ax, x_ay, y_ay, z_ay, r_ay, x_bx, y_bx, z_bx, r_bx, x_by, y_by, z_by, r_by] this_boneframe = vmdstruct.VmdBoneFrame( name=bname_str, f=f, pos=[xp,yp,zp], rot=[xrot,yrot,zrot], phys_off=phys_off, interp=interp_list ) boneframe_list.append(this_boneframe) # display progress printouts core.print_progress_oneline(core.get_readfrom_byte() / len(raw)) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("frame=", z) core.MY_PRINT_FUNC("totalframes=", boneframe_ct) core.MY_PRINT_FUNC("section=boneframe") core.MY_PRINT_FUNC("Err: something went wrong while parsing, file is probably corrupt/malformed") raise RuntimeError() return boneframe_list
def parse_vmd_camframe(raw: bytearray, moreinfo: bool) -> List[vmdstruct.VmdCamFrame]: camframe_list = [] # is there enough file left to read a single number? if (len(raw) - core.get_readfrom_byte()) < struct.calcsize(fmt_number): core.MY_PRINT_FUNC( "Warning: expected camframe_ct field but file ended unexpectedly! Assuming 0 camframes and continuing..." ) return camframe_list ############################ # get the number of cam frames camframe_ct = core.my_unpack(fmt_number, raw) if moreinfo: core.MY_PRINT_FUNC("...# of camframes = %d" % camframe_ct) for z in range(camframe_ct): try: # unpack into variables (f, d, xp, yp, zp, xr, yr, zr, x_ax, x_bx, x_ay, x_by, y_ax, y_bx, y_ay, y_by, z_ax, z_bx, z_ay, z_by, r_ax, r_bx, r_ay, r_by, dist_ax, dist_bx, dist_ay, dist_by, ang_ax, ang_bx, ang_ay, ang_by, fov, per) = core.my_unpack(fmt_camframe, raw) interp_list = [ x_ax, x_bx, x_ay, x_by, y_ax, y_bx, y_ay, y_by, z_ax, z_bx, z_ay, z_by, r_ax, r_bx, r_ay, r_by, dist_ax, dist_bx, dist_ay, dist_by, ang_ax, ang_bx, ang_ay, ang_by ] this_camframe = vmdstruct.VmdCamFrame( f=f, dist=d, pos=[xp, yp, zp], rot=[ math.degrees(j) for j in (xr, yr, zr) ], # angle comes in as radians, convert radians to degrees interp=interp_list, fov=fov, perspective=per) camframe_list.append(this_camframe) # display progress printouts core.print_progress_oneline(core.get_readfrom_byte() / len(raw)) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("frame=", z) core.MY_PRINT_FUNC("totalframes=", camframe_ct) core.MY_PRINT_FUNC("section=camframe") core.MY_PRINT_FUNC( "Err: something went wrong while parsing, file is probably corrupt/malformed" ) raise RuntimeError() return camframe_list
def encode_vmd_boneframe(nice: List[vmdstruct.VmdBoneFrame], moreinfo: bool) -> bytearray: output = bytearray() ############################# # bone frames # first, the number of frames if moreinfo: core.MY_PRINT_FUNC("...# of boneframes = %d" % len(nice)) output += core.my_pack(fmt_number, len(nice)) # then, all the actual frames for i, frame in enumerate(nice): # assemble the boneframe # first, gotta convert from euler to quaternion! euler = frame.rot # x y z (w, x, y, z) = core.euler_to_quaternion(euler) # w x y z quat = [x, y, z, w] # x y z w # then, do the part that isn't the interpolation curve (first 9 values in binary, 8 things in frame), save as frame try: # now encode/pack/append the non-interp, non-phys portion packme = [frame.name, frame.f, *frame.pos, *quat] # packme.extend(frame.pos) # packme.extend(quat) output += core.my_pack(fmt_boneframe_no_interpcurve, packme) # then, create one line of the interpolation curve (last 16 values of frame obj) interp = core.my_pack(fmt_boneframe_interpcurve_oneline, frame.interp) except Exception as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("line=", i) core.MY_PRINT_FUNC("section=boneframe") core.MY_PRINT_FUNC( "Err: something went wrong while synthesizing binary output, probably the wrong type/order of values on a line" ) raise RuntimeError() # do the dumb copy-and-shift thing to rebuild the original 4-line structure of redundant bytes interp += interp[1:] + bytes(1) + interp[2:] + bytes( 2) + interp[3:] + bytes(3) # now overwrite the odd missing bytes with physics enable/disable data if frame.phys_off is True: interp[2] = 99 interp[3] = 15 else: interp[2] = 0 interp[3] = 0 # append the interpolation data onto the real output output += interp # progress thing just because core.print_progress_oneline(ENCODE_PERCENT_BONE * i / len(nice)) return output
def main(moreinfo=True): # prompt PMX name core.MY_PRINT_FUNC("Please enter name of PMX input file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo) # get bones realbones = pmx.bones # then, make 2 lists: one starting from jp_righttoe, one starting from jp_lefttoe # start from each "toe" bone (names are known), go parent-find-parent-find until reaching no-parent bonechain_r = build_bonechain(realbones, jp_righttoe) bonechain_l = build_bonechain(realbones, jp_lefttoe) # assert that the bones were found, have correct names, and are in the correct positions # also verifies that they are direct parent-child with nothing in between try: assert bonechain_r[-1].name == jp_righttoe assert bonechain_r[-2].name == jp_rightfoot assert bonechain_l[-1].name == jp_lefttoe assert bonechain_l[-2].name == jp_leftfoot except AssertionError: core.MY_PRINT_FUNC( "ERROR: unexpected structure found for foot/toe bones, verify semistandard names and structure" ) raise RuntimeError() # then walk down these 2 lists, add each name to a set: build union of all relevant bones relevant_bones = set() for b in bonechain_r + bonechain_l: relevant_bones.add(b.name) # check if waist-cancellation bones are in "relevant_bones", print a warning if they are if jp_left_waistcancel in relevant_bones or jp_right_waistcancel in relevant_bones: # TODO LOW: i probably could figure out how to support them but this whole script is useless so idgaf core.MY_PRINT_FUNC( "Warning: waist-cancellation bones found in the model! These are not supported, tool may produce bad results! Attempting to continue..." ) # also need to find initial positions of ik bones (names are known) # build a full parentage-chain for each leg bonechain_ikr = build_bonechain(realbones, jp_righttoe_ik) bonechain_ikl = build_bonechain(realbones, jp_lefttoe_ik) # verify that the ik bones were found, have correct names, and are in the correct positions try: assert bonechain_ikr[-1].name == jp_righttoe_ik assert bonechain_ikr[-2].name == jp_rightfoot_ik assert bonechain_ikl[-1].name == jp_lefttoe_ik assert bonechain_ikl[-2].name == jp_leftfoot_ik except AssertionError: core.MY_PRINT_FUNC( "ERROR: unexpected structure found for foot/toe IK bones, verify semistandard names and structure" ) raise RuntimeError() # verify that the bonechains are symmetric in length try: assert len(bonechain_l) == len(bonechain_r) assert len(bonechain_ikl) == len(bonechain_ikr) except AssertionError: core.MY_PRINT_FUNC( "ERROR: unexpected structure found, model is not left-right symmetric" ) raise RuntimeError() # determine how many levels of parentage, this value "t" should hold the first level where they are no longer shared t = 0 while bonechain_l[t].name == bonechain_ikl[t].name: t += 1 # back off one level lowest_shared_parent = t - 1 # now i am completely done with the bones CSV, all the relevant info has been distilled down to: # !!! bonechain_r, bonechain_l, bonechain_ikr, bonechain_ikl, relevant_bones core.MY_PRINT_FUNC("...identified " + str(len(bonechain_l)) + " bones per leg-chain, " + str(len(relevant_bones)) + " relevant bones total") core.MY_PRINT_FUNC("...identified " + str(len(bonechain_ikl)) + " bones per IK leg-chain") ################################################################################### # prompt VMD file name core.MY_PRINT_FUNC("Please enter name of VMD dance input file:") input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd") nicelist_in = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo) # check if this VMD uses IK or not, print a warning if it does any_ik_on = False for ikdispframe in nicelist_in.ikdispframes: for ik_bone in ikdispframe.ikbones: if ik_bone.enable is True: any_ik_on = True break if any_ik_on: core.MY_PRINT_FUNC( "Warning: the input VMD already has IK enabled, there is no point in running this script. Attempting to continue..." ) # reduce down to only the boneframes for the relevant bones # also build a list of each framenumber with a frame for a bone we care about relevant_framenums = set() boneframe_list = [] for boneframe in nicelist_in.boneframes: if boneframe.name in relevant_bones: boneframe_list.append(boneframe) relevant_framenums.add(boneframe.f) # sort the boneframes by frame number boneframe_list.sort(key=lambda x: x.f) # make the relevant framenumbers also an ascending list relevant_framenums = sorted(list(relevant_framenums)) boneframe_dict = dict() # now restructure the data from a list to a dictionary, keyed by bone name. also discard excess data when i do for b in boneframe_list: if b.name not in boneframe_dict: boneframe_dict[b.name] = [] # only storing the frame#(1) + position(234) + rotation values(567) saveme = [b.f, *b.pos, *b.rot] boneframe_dict[b.name].append(saveme) core.MY_PRINT_FUNC( "...running interpolation to rectangularize the frames...") has_warned = False # now fill in the blanks by using interpolation, if needed for key, bone in boneframe_dict.items(): # for each bone, # start a list of frames generated by interpolation interpframe_list = [] i = 0 j = 0 while j < len(relevant_framenums): # for each frame it should have, if i == len(bone): # if i is beyond end of bone, then copy the values from the last frame and use as a new frame newframe = [relevant_framenums[j]] + bone[-1][1:7] interpframe_list.append(newframe) j += 1 elif bone[i][0] == relevant_framenums[j]: # does it have it? i += 1 j += 1 else: # TODO LOW: i could modify this to include my interpolation curve math now that I understand it, but i dont care if not has_warned: core.MY_PRINT_FUNC( "Warning: interpolation is needed but interpolation curves are not fully tested! Assuming linear interpolation..." ) has_warned = True # if there is a mismatch then the target framenum is less than the boneframe framenum # build a frame that has frame# + position(123) + rotation values(456) newframe = [relevant_framenums[j]] # if target is less than the current boneframe, interp between here and prev boneframe for p in range(1, 4): # interpolate for each position offset newframe.append( core.linear_map(bone[i][0], bone[i][p], bone[i - 1][0], bone[i - 1][p], relevant_framenums[j])) # rotation interpolation must happen in the quaternion-space quat1 = core.euler_to_quaternion(bone[i - 1][4:7]) quat2 = core.euler_to_quaternion(bone[i][4:7]) # desired frame is relevant_framenums[j] = d # available frames are bone[i-1][0] = s and bone[i][0] = e # percentage = (d - s) / (e - s) percentage = (relevant_framenums[j] - bone[i - 1][0]) / (bone[i][0] - bone[i - 1][0]) quat_slerp = core.my_slerp(quat1, quat2, percentage) euler_slerp = core.quaternion_to_euler(quat_slerp) newframe += euler_slerp interpframe_list.append(newframe) j += 1 bone += interpframe_list bone.sort(key=core.get1st) # the dictionary should be fully filled out and rectangular now for bone in boneframe_dict: assert len(boneframe_dict[bone]) == len(relevant_framenums) # now i am completely done reading the VMD file and parsing its data! everything has been distilled down to: # relevant_framenums, boneframe_dict ################################################################################### # begin the actual calculations core.MY_PRINT_FUNC("...beginning forward kinematics computation for " + str(len(relevant_framenums)) + " frames...") # output array ikframe_list = [] # have list of bones, parentage, initial pos # have list of frames # now i "run the dance" and build the ik frames # for each relevant frame, for I in range(len(relevant_framenums)): # for each side, for (thisik, this_chain) in zip([bonechain_ikr, bonechain_ikl], [bonechain_r, bonechain_l]): # for each bone in this_chain (ordered, start with root!), for J in range(len(this_chain)): # reset the current to be the inital position again this_chain[J].reset() # for each bone in this_chain (ordered, start with toe! do children before parents!) # also, don't read/use root! because the IK are also children of root, they inherit the same root transformations # count backwards from end to lowest_shared_parent, not including lowest_shared_parent for J in range(len(this_chain) - 1, lowest_shared_parent, -1): # get bone J within this_chain, translate to name name = this_chain[J].name # get bone [name] at index I: position & rotation try: xpos, ypos, zpos, xrot, yrot, zrot = boneframe_dict[name][ I][1:7] except KeyError: continue # apply position offset to self & children # also resets the currposition when changing frames for K in range(J, len(this_chain)): # set this_chain[K].current456 = current456 + position this_chain[K].xcurr += xpos this_chain[K].ycurr += ypos this_chain[K].zcurr += zpos # apply rotation offset to all children, but not self _origin = [ this_chain[J].xcurr, this_chain[J].ycurr, this_chain[J].zcurr ] _angle = [xrot, yrot, zrot] _angle_quat = core.euler_to_quaternion(_angle) for K in range(J, len(this_chain)): # set this_chain[K].current456 = current rotated around this_chain[J].current456 _point = [ this_chain[K].xcurr, this_chain[K].ycurr, this_chain[K].zcurr ] _newpoint = rotate3d(_origin, _angle_quat, _point) (this_chain[K].xcurr, this_chain[K].ycurr, this_chain[K].zcurr) = _newpoint # also rotate the angle of this bone curr_angle_euler = [ this_chain[K].xrot, this_chain[K].yrot, this_chain[K].zrot ] curr_angle_quat = core.euler_to_quaternion( curr_angle_euler) new_angle_quat = core.hamilton_product( _angle_quat, curr_angle_quat) new_angle_euler = core.quaternion_to_euler(new_angle_quat) (this_chain[K].xrot, this_chain[K].yrot, this_chain[K].zrot) = new_angle_euler pass pass # now i have cascaded this frame's pose data down the this_chain # grab foot/toe (-2 and -1) current position and calculate IK offset from that # first, foot: # footikend - footikinit = footikoffset xfoot = this_chain[-2].xcurr - thisik[-2].xinit yfoot = this_chain[-2].ycurr - thisik[-2].yinit zfoot = this_chain[-2].zcurr - thisik[-2].zinit # save as boneframe to be ultimately formatted for VMD: # need bonename = (known) # need frame# = relevantframe#s[I] # position = calculated # rotation = 0 # phys = not disabled # interp = default (20/107) # # then, foot-angle: just copy the angle that the foot has if STORE_IK_AS_FOOT_ONLY: ikframe = [ thisik[-2].name, relevant_framenums[I], xfoot, yfoot, zfoot, this_chain[-2].xrot, this_chain[-2].yrot, this_chain[-2].zrot, False ] else: ikframe = [ thisik[-2].name, relevant_framenums[I], xfoot, yfoot, zfoot, 0.0, 0.0, 0.0, False ] ikframe += [20] * 8 ikframe += [107] * 8 # append the freshly-built frame ikframe_list.append(ikframe) if not STORE_IK_AS_FOOT_ONLY: # then, toe: # toeikend - toeikinit - footikoffset = toeikoffset xtoe = this_chain[-1].xcurr - thisik[-1].xinit - xfoot ytoe = this_chain[-1].ycurr - thisik[-1].yinit - yfoot ztoe = this_chain[-1].zcurr - thisik[-1].zinit - zfoot ikframe = [ thisik[-1].name, relevant_framenums[I], xtoe, ytoe, ztoe, 0.0, 0.0, 0.0, False ] ikframe += [20] * 8 ikframe += [107] * 8 # append the freshly-built frame ikframe_list.append(ikframe) # now done with a timeframe for all bones on both sides # print progress updates core.print_progress_oneline(I / len(relevant_framenums)) core.MY_PRINT_FUNC( "...done with forward kinematics computation, now writing output...") if INCLUDE_IK_ENABLE_FRAME: # create a single ikdispframe that enables the ik bones at frame 0 ikbones = [ vmdstruct.VmdIkbone(name=jp_rightfoot_ik, enable=True), vmdstruct.VmdIkbone(name=jp_righttoe_ik, enable=True), vmdstruct.VmdIkbone(name=jp_leftfoot_ik, enable=True), vmdstruct.VmdIkbone(name=jp_lefttoe_ik, enable=True) ] ikdispframe_list = [ vmdstruct.VmdIkdispFrame(f=0, disp=True, ikbones=ikbones) ] else: ikdispframe_list = [] core.MY_PRINT_FUNC( "Warning: IK following will NOT be enabled when this VMD is loaded, you will need enable it manually!" ) # convert old-style bonelist ikframe_list to new object format ikframe_list = [ vmdstruct.VmdBoneFrame(name=r[0], f=r[1], pos=r[2:5], rot=r[5:8], phys_off=r[8], interp=r[9:]) for r in ikframe_list ] # build actual VMD object nicelist_out = vmdstruct.Vmd( vmdstruct.VmdHeader(2, "SEMISTANDARD-IK-BONES--------"), ikframe_list, # bone [], # morph [], # cam [], # light [], # shadow ikdispframe_list # ikdisp ) # write out output_filename_vmd = "%s_ik_from_%s.vmd" % \ (input_filename_vmd[0:-4], core.get_clean_basename(input_filename_pmx)) output_filename_vmd = core.get_unused_file_name(output_filename_vmd) vmdlib.write_vmd(output_filename_vmd, nicelist_out, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None
def repair_invalid_normals(pmx: pmxstruct.Pmx, normbad: List[int]) -> int: """ Repair all 0,0,0 normals in the model by averaging the normal vector for each face that vertex is a member of. It is theoretically possible for a vertex to be a member in two faces with exactly opposite normals, and therefore the average would be zero; in this case one of the faces is arbitrarily chosen and its normal is used. Therefore, after this function all invalid normals are guaranteed to be fixed. Returns the number of times this fallback method was used. :param pmx: PMX list-of-lists object :param normbad: list of vertex indices so I don't need to walk all vertices again :return: # times fallback method was used """ normbad_err = 0 # create a list in parallel with the faces list for holding the perpendicular normal to each face facenorm_list = [list() for i in pmx.faces] # create a list in paralle with normbad for holding the set of faces connected to each bad-norm vert normbad_linked_faces = [list() for i in normbad] # goal: build the sets of faces that are associated with each bad vertex # first, flatten the list of face-vertices, probably faster to search that way flatlist = [item for sublist in pmx.faces for item in sublist] # second, for each face-vertex, check if it is a bad vertex # (this takes 70% of time) for d, facevert in enumerate(flatlist): core.print_progress_oneline(.7 * d / len(flatlist)) # bad vertices are unique and in sorted order, can use binary search to further optimize whereinlist = core.binary_search_wherein(facevert, normbad) if whereinlist != -1: # if it is a bad vertex, int div by 3 to get face ID (normbad_linked_faces[whereinlist]).append(d // 3) # for each bad vert: # (this takes 30% of time) for d, (badvert_idx, badvert_faces) in enumerate(zip(normbad, normbad_linked_faces)): newnorm = [0, 0, 0] # default value in case something goes wrong core.print_progress_oneline(.7 + (.3 * d / len(normbad))) # iterate over the faces it is connected to for face_id in badvert_faces: # for each face, does the perpendicular normal already exist in the parallel list? if not, calculate and save it for reuse facenorm = facenorm_list[face_id] if not facenorm: # need to calculate it! use cross product or whatever # q,r,s order of vertices is important! q = pmx.verts[pmx.faces[face_id][0]].pos r = pmx.verts[pmx.faces[face_id][1]].pos s = pmx.verts[pmx.faces[face_id][2]].pos # qr, qs order of vertices is critically important! qr = [r[i] - q[i] for i in range(3)] qs = [s[i] - q[i] for i in range(3)] facenorm = core.my_cross_product(qr, qs) # then normalize the fresh normal norm_L = core.my_euclidian_distance(facenorm) try: facenorm = [n / norm_L for n in facenorm] except ZeroDivisionError: # this should never happen in normal cases # however it can happen when the verts are at the same position and therefore their face has zero surface area facenorm = [0, 1, 0] # then save the result so I don't have to do this again facenorm_list[face_id] = facenorm # once I have the perpendicular normal for this face, then accumulate it (will divide later to get avg) for i in range(3): newnorm[i] += facenorm[i] # error case check, theoretically possible for this to happen if there are no connected faces or their normals exactly cancel out if newnorm == [0, 0, 0]: if len(badvert_faces) == 0: # if there are no connected faces, set the normal to 0,1,0 (same handling as PMXE) pmx.verts[badvert_idx].norm = [0, 1, 0] else: # if there are faces that just so happened to perfectly cancel, choose the first face and use its normal pmx.verts[badvert_idx].norm = facenorm_list[badvert_faces[0]] normbad_err += 1 continue # when done accumulating, divide by # to make an average # zerodiv err not possible: if there are no connected faces then it will hit [0,0,0] branch above newnorm = [n / len(badvert_faces) for n in newnorm] # then normalize this, again norm_L = core.my_euclidian_distance(newnorm) newnorm = [n / norm_L for n in newnorm] # finally, apply this new normal pmx.verts[badvert_idx].norm = newnorm return normbad_err
def apply_bone_remapping(pmx: pmxstruct.Pmx, bone_dellist: List[int], bone_shiftmap: Tuple[List[int], List[int]]): """ Given a list of bones to delete, delete them, and update the indices for all references to all remaining bones. PMX is modified in-place. Behavior is undefined if the dellist bones are still in use somewhere! References include: vertex weight, bone morph, display frame, rigidbody anchor, bone tail, bone partial inherit, bone IK target, bone IK link. :param pmx: PMX object :param bone_dellist: list of bone indices to delete :param bone_shiftmap: created by delme_list_to_rangemap() before calling """ core.print_progress_oneline(0 / 5) # VERTICES: # just remap the bones that have weight # any references to bones being deleted will definitely have 0 weight, and therefore it doesn't matter what they reference afterwards for d, vert in enumerate(pmx.verts): for pair in vert.weight: pair[0] = newval_from_range_map(int(pair[0]), bone_shiftmap) # done with verts core.print_progress_oneline(1 / 5) # MORPHS: for d, morph in enumerate(pmx.morphs): # only operate on bone morphs if morph.morphtype != pmxstruct.MorphType.BONE: continue # first, it is plausible that bone morphs could reference otherwise unused bones, so I should check for and delete those i = 0 while i < len(morph.items): it = morph.items[i] it: pmxstruct.PmxMorphItemBone # if the bone being manipulated is in the list of bones being deleted, delete it here too. otherwise remap. if core.binary_search_isin(it.bone_idx, bone_dellist): morph.items.pop(i) else: it.bone_idx = newval_from_range_map(it.bone_idx, bone_shiftmap) i += 1 # done with morphs core.print_progress_oneline(2 / 5) # DISPLAY FRAMES for d, frame in enumerate(pmx.frames): i = 0 while i < len(frame.items): item = frame.items[i] # if this item is a morph, skip it if item.is_morph: i += 1 else: # if this is one of the bones being deleted, delete it here too. otherwise remap. if core.binary_search_isin(item.idx, bone_dellist): frame.items.pop(i) else: item.idx = newval_from_range_map(item.idx, bone_shiftmap) i += 1 # done with frames core.print_progress_oneline(3 / 5) # RIGIDBODY for d, body in enumerate(pmx.rigidbodies): # only remap, no possibility of one of these bones being deleted body.bone_idx = newval_from_range_map(body.bone_idx, bone_shiftmap) # done with bodies core.print_progress_oneline(4 / 5) # BONES: point-at target, true parent, external parent, partial append, ik stuff for d, bone in enumerate(pmx.bones): # point-at link: if bone.tail_usebonelink: if core.binary_search_isin(bone.tail, bone_dellist): # if pointing at a bone that will be deleted, instead change to offset with offset 0,0,0 bone.tail_usebonelink = False bone.tail = [0, 0, 0] else: # otherwise, remap bone.tail = newval_from_range_map(bone.tail, bone_shiftmap) # other 4 categories only need remapping # true parent: bone.parent_idx = newval_from_range_map(bone.parent_idx, bone_shiftmap) # partial append: if (bone.inherit_rot or bone.inherit_trans) and bone.inherit_parent_idx != -1: if core.binary_search_isin(bone.inherit_parent_idx, bone_dellist): # if a bone is getting partial append from a bone getting deleted, break that relationship # shouldn't be possible but whatever i'll support the case bone.inherit_rot = False bone.inherit_trans = False bone.inherit_parent_idx = -1 else: bone.inherit_parent_idx = newval_from_range_map( bone.inherit_parent_idx, bone_shiftmap) # ik stuff: if bone.has_ik: bone.ik_target_idx = newval_from_range_map(bone.ik_target_idx, bone_shiftmap) for link in bone.ik_links: link.idx = newval_from_range_map(link.idx, bone_shiftmap) # done with bones return
def prune_unused_vertices(pmx: pmxstruct.Pmx, moreinfo=False): ############################# # ready for logic # vertices are referenced in faces, morphs (uv and vertex morphs), and soft bodies (should be handled just for completeness' sake) # find individual vertices to delete # build set of vertices used in faces # build set of all vertices (just a range) # subtract # convert to sorted list # convert to list of [begin, length] # iterate over delvertlist, identify contiguous blocks # convert to list of [begin, cumulative size] # build set of USED vertices used_verts = set() for face in pmx.faces: used_verts.add(face[0]) used_verts.add(face[1]) used_verts.add(face[2]) # build set of ALL vertices all_verts = set(list(range(len(pmx.verts)))) # derive set of UNUSED vertices unused_verts = all_verts.difference(used_verts) # convert to ordered list delme_verts = sorted(list(unused_verts)) numdeleted = len(delme_verts) prevtotal = len(pmx.verts) if numdeleted == 0: core.MY_PRINT_FUNC("No changes are required") return pmx, False delme_range = delme_list_to_rangemap(delme_verts) if moreinfo: core.MY_PRINT_FUNC( "Detected %d orphan vertices arranged in %d contiguous blocks" % (len(delme_verts), len(delme_range[0]))) # need to update places that reference vertices: faces, morphs, softbody # first get the total # of iterations I need to do, for progress purposes: #faces + sum of len of all UV and vert morphs totalwork = len(pmx.faces) + sum([ len(m.items) for m in pmx.morphs if (m.morphtype in (1, 3, 4, 5, 6, 7)) ]) # faces: d = 0 for d, face in enumerate(pmx.faces): # vertices in a face are not guaranteed sorted, and sorting them is a Very Bad Idea # therefore they must be remapped individually face[0] = newval_from_range_map(face[0], delme_range) face[1] = newval_from_range_map(face[1], delme_range) face[2] = newval_from_range_map(face[2], delme_range) # display progress printouts core.print_progress_oneline(d / totalwork) # core.MY_PRINT_FUNC("Done updating vertex references in faces") # morphs: orphan_vertex_references = 0 for morph in pmx.morphs: # if not a vertex morph or UV morph, skip it if not morph.morphtype in (1, 3, 4, 5, 6, 7): continue lenbefore = len(morph.items) # it is plausible that vertex/uv morphs could reference orphan vertices, so I should check for and delete those i = 0 while i < len(morph.items): # if the vertex being manipulated is in the list of verts being deleted, if core.binary_search_isin(morph.items[i].vert_idx, delme_verts): # delete it here too morph.items.pop(i) orphan_vertex_references += 1 else: # otherwise, remap it # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once i += 1 # morphs usually contain vertexes in sorted order, but not guaranteed!!! MAKE it sorted, nobody will mind morph.items.sort(key=lambda x: x.vert_idx) # separate the vertices from the morph entries into a list of their own, for more efficient remapping vertlist = [x.vert_idx for x in morph.items] # remap remappedlist = newval_from_range_map(vertlist, delme_range) # write the remapped values back into where they came from for x, newval in zip(morph.items, remappedlist): x.vert_idx = newval # display progress printouts d += lenbefore core.print_progress_oneline(d / totalwork) # core.MY_PRINT_FUNC("Done updating vertex references in morphs") # softbody: probably not relevant but eh for soft in pmx.softbodies: # anchors # first, delete any references to delme verts in the anchors i = 0 while i < len(soft.anchors_list): # if the vertex referenced is in the list of verts being deleted, if core.binary_search_isin(soft.anchors_list[i][1], delme_verts): # delete it here too soft.anchors_list.pop(i) else: # otherwise, remap it # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once i += 1 # MAKE it sorted, nobody will mind soft.anchors_list.sort(key=lambda x: x[1]) # extract the vert indices into a list of their town anchorlist = [x[1] for x in soft.anchors_list] # remap newanchorlist = newval_from_range_map(anchorlist, delme_range) # write the remapped values back into where they came from for x, newval in zip(soft.anchors_list, newanchorlist): x[1] = newval # vertex pins # first, delete any references to delme verts i = 0 while i < len(soft.vertex_pin_list): # if the vertex referenced is in the list of verts being deleted, if core.binary_search_isin(soft.vertex_pin_list[i], delme_verts): # delete it here too soft.vertex_pin_list.pop(i) else: # otherwise, remap it # but don't remap it here, wait until I'm done deleting vertices and then tackle them all at once i += 1 # MAKE it sorted, nobody will mind soft.anchors_list.sort() # remap soft.vertex_pin_list = newval_from_range_map(soft.vertex_pin_list, delme_range) # done with softbodies! # now, finally, actually delete the vertices from the vertex list delme_verts.reverse() for f in delme_verts: pmx.verts.pop(f) core.MY_PRINT_FUNC( "Identified and deleted {} / {} = {:.1%} vertices for being unused". format(numdeleted, prevtotal, numdeleted / prevtotal)) return pmx, True
def main(moreinfo=True): # the goal: extract rotation around the "arm" bone local X? axis and transfer it to rotation around the "armtwist" bone local axis # prompt PMX name core.MY_PRINT_FUNC("Please enter name of PMX input file:") input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx") pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo) core.MY_PRINT_FUNC("") # get bones realbones = pmx.bones twistbone_axes = [] # then, grab the "twist" bones & save their fixed-rotate axes, if they have them # fallback plan: find the arm-to-elbow and elbow-to-wrist unit vectors and use those for i in range(len(jp_twistbones)): r = core.my_list_search(realbones, lambda x: x.name_jp == jp_twistbones[i], getitem=True) if r is None: core.MY_PRINT_FUNC("ERROR1: twist bone '{}'({}) cannot be found model, unable to continue. Ensure they use the correct semistandard names, or edit the script to change the JP names it is looking for.".format(jp_twistbones[i], eng_twistbones[i])) raise RuntimeError() if r.has_fixedaxis: # this bone DOES have fixed-axis enabled! use the unit vector in r[18] twistbone_axes.append(r.fixedaxis) else: # i can infer local axis by angle from arm-to-elbow or elbow-to-wrist start = core.my_list_search(realbones, lambda x: x.name_jp == jp_sourcebones[i], getitem=True) if start is None: core.MY_PRINT_FUNC("ERROR2: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_sourcebones[i]) raise RuntimeError() end = core.my_list_search(realbones, lambda x: x.name_jp == jp_pointat_bones[i], getitem=True) if end is None: core.MY_PRINT_FUNC("ERROR3: semistandard bone '%s' is missing from the model, unable to infer axis of rotation" % jp_pointat_bones[i]) raise RuntimeError() start_pos = start.pos end_pos = end.pos # now have both startpoint and endpoint! find the delta! delta = [b - a for a,b in zip(start_pos, end_pos)] # normalize to length of 1 length = core.my_euclidian_distance(delta) unit = [t / length for t in delta] twistbone_axes.append(unit) # done extracting axes limits from bone CSV, in list "twistbone_axes" core.MY_PRINT_FUNC("...done extracting axis limits from PMX...") ################################################################################### # prompt VMD file name core.MY_PRINT_FUNC("Please enter name of VMD dance input file:") input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd") # next, read/use/prune the dance vmd nicelist_in = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo) # sort boneframes into individual lists: one for each [Larm + Lelbow + Rarm + Relbow] and remove them from the master boneframelist # frames for all other bones stay in the master boneframelist all_sourcebone_frames = [] for sourcebone in jp_sourcebones: # partition & writeback temp, nicelist_in.boneframes = core.my_list_partition(nicelist_in.boneframes, lambda x: x.name == sourcebone) # all frames for "sourcebone" get their own sublist here all_sourcebone_frames.append(temp) # verify that there is actually arm/elbow frames to process sourcenumframes = sum([len(x) for x in all_sourcebone_frames]) if sourcenumframes == 0: core.MY_PRINT_FUNC("No arm/elbow bone frames are found in the VMD, nothing for me to do!") core.MY_PRINT_FUNC("Aborting: no files were changed") return None else: core.MY_PRINT_FUNC("...source contains " + str(sourcenumframes) + " arm/elbow bone frames to decompose...") if USE_OVERKEY_BANDAID: # to fix the path that the arms take during interpolation we need to overkey the frames # i.e. create intermediate frames that they should have been passing through already, to FORCE it to take the right path # i'm replacing the interpolation curves with actual frames for sublist in all_sourcebone_frames: newframelist = [] sublist.sort(key=lambda x: x.f) # ensure they are sorted by frame number # for each frame for i in range(1, len(sublist)): this = sublist[i] prev = sublist[i-1] # use interpolation curve i to interpolate from i-1 to i # first: do i need to do anything or are they already close on the timeline? thisframenum = this.f prevframenum = prev.f if (thisframenum - prevframenum) <= OVERKEY_FRAME_SPACING: continue # if they are far enough apart that i need to do something, thisframequat = core.euler_to_quaternion(this.rot) prevframequat = core.euler_to_quaternion(prev.rot) # 3, 7, 11, 15 = r_ax, r_ay, r_bx, r_by bez = core.MyBezier((this.interp[3], this.interp[7]), (this.interp[11], this.interp[15]), resolution=50) # create new frames at these frame numbers, spacing is OVERKEY_FRAME_SPACING for interp_framenum in range(prevframenum + OVERKEY_FRAME_SPACING, thisframenum, OVERKEY_FRAME_SPACING): # calculate the x time percentage from prev frame to this frame x = (interp_framenum - prevframenum) / (thisframenum - prevframenum) # apply the interpolation curve to translate X to Y y = bez.approximate(x) # interpolate from prev to this by amount Y interp_quat = core.my_slerp(prevframequat, thisframequat, y) # begin building the new frame newframe = vmdstruct.VmdBoneFrame( name=this.name, # same name f=interp_framenum, # overwrite frame num pos=list(this.pos), # same pos (but make a copy) rot=list(core.quaternion_to_euler(interp_quat)), # overwrite euler angles phys_off=this.phys_off, # same phys_off interp=list(core.bone_interpolation_default_linear) # overwrite interpolation ) newframelist.append(newframe) # overwrite thisframe interp curve with default too this.interp = list(core.bone_interpolation_default_linear) # overwrite custom interpolation # concat the new frames onto the existing frames for this sublist sublist += newframelist # re-count the number of frames for printing purposes totalnumframes = sum([len(x) for x in all_sourcebone_frames]) overkeyframes = totalnumframes - sourcenumframes if overkeyframes != 0: core.MY_PRINT_FUNC("...overkeying added " + str(overkeyframes) + " arm/elbow bone frames...") core.MY_PRINT_FUNC("...beginning decomposition of " + str(totalnumframes) + " arm/elbow bone frames...") # now i am completely done reading the VMD file and parsing its data! everything has been distilled down to: # all_sourcebone_frames = [Larm, Lelbow, Rarm, Relbow] plus nicelist_in[1] ################################################################################### # begin the actual calculations # output array new_twistbone_frames = [] # progress tracker curr_progress = 0 # for each sourcebone & corresponding twistbone, for (twistbone, axis_orig, sourcebone_frames) in zip(jp_twistbones, twistbone_axes, all_sourcebone_frames): # for each frame of the sourcebone, for frame in sourcebone_frames: # XYZrot = 567 euler quat_in = core.euler_to_quaternion(frame.rot) axis = list(axis_orig) # make a copy to be safe # "swing twist decomposition" # swing = "local" x rotation and nothing else # swing = sourcebone, twist = twistbone (swing, twist) = swing_twist_decompose(quat_in, axis) # modify "frame" in-place # only modify the XYZrot to use new values new_sourcebone_euler = core.quaternion_to_euler(swing) frame.rot = list(new_sourcebone_euler) # create & store new twistbone frame # name=twistbone, framenum=copy, XYZpos=copy, XYZrot=new, phys=copy, interp16=copy new_twistbone_euler = core.quaternion_to_euler(twist) newframe = vmdstruct.VmdBoneFrame( name=twistbone, f=frame.f, pos=list(frame.pos), rot=list(new_twistbone_euler), phys_off=frame.phys_off, interp=list(frame.interp) ) new_twistbone_frames.append(newframe) # print progress updates curr_progress += 1 core.print_progress_oneline(curr_progress / totalnumframes) ###################################################################### # done with calculations! core.MY_PRINT_FUNC("...done with decomposition, now reassembling output...") # attach the list of newly created boneframes, modify the original input for sublist in all_sourcebone_frames: nicelist_in.boneframes += sublist nicelist_in.boneframes += new_twistbone_frames core.MY_PRINT_FUNC("") # write out the VMD output_filename_vmd = "%s_twistbones_for_%s.vmd" % \ (input_filename_vmd[0:-4], core.get_clean_basename(input_filename_pmx)) output_filename_vmd = core.get_unused_file_name(output_filename_vmd) vmdlib.write_vmd(output_filename_vmd, nicelist_in, moreinfo=moreinfo) core.MY_PRINT_FUNC("Done!") return None
def google_translate(in_list: STR_OR_STRLIST, strategy=1) -> STR_OR_STRLIST: """ Take a list of strings & get them all translated by asking Google. Can use per-line strategy or new 'chunkwise' strategy. :param in_list: list of JP or partially JP strings :param strategy: 0=old per-line strategy, 1=new chunkwise strategy, 2=auto choose whichever needs less Google traffic :return: list of strings probably pure EN, but sometimes odd unicode symbols show up """ input_is_str = isinstance(in_list, str) if input_is_str: in_list = [ in_list ] # force it to be a list anyway so I don't have to change my structure use_chunk_strat = True if strategy == 1 else False # in_list -> pretrans -> jp_chunks_set -> jp_chunks -> jp_chunks_packets -> results_packets -> results # jp_chunks + results -> google_dict # pretrans + google_dict -> outlist # 1. pre-translate to take care of common tasks indents, bodies, suffixes = translation_tools.pre_translate(in_list) # 2. identify chunks jp_chunks_set = set() # idea: walk & look for transition from en to jp? for s in bodies: # for every string to translate, rstart = 0 prev_islatin = True is_latin = True for i in range(len(s)): # walk along its length one char at a time, # use "is_jp" here and not "is_latin" so chunks are defined to be only actual JP stuff and not unicode whatevers is_latin = not translation_tools.is_jp(s[i]) # if char WAS latin but now is NOT latin, then this is the start of a range. if prev_islatin and not is_latin: rstart = i # if it was jp and is now latin, then this is the end of a range (not including here). save it! elif is_latin and not prev_islatin: jp_chunks_set.add(s[rstart:i]) prev_islatin = is_latin # now outside the loop... if i ended with a non-latin char, grab the final range & add that too if not is_latin: jp_chunks_set.add(s[rstart:len(s)]) # 3. remove chunks I can already solve # maybe localtrans can solve one chunk but not the whole string? # chunks are guaranteed to not be PART OF compound words. but they are probably compound words themselves. # run local trans on each chunk individually, and if it succeeds, then DON'T send it to google. localtrans_dict = dict() jp_chunks = [] for chunk in list(jp_chunks_set): trans = translation_tools.piecewise_translate( chunk, translation_tools.words_dict) if translation_tools.is_jp(trans): # if the localtrans failed, then the chunk needs to be sent to google later jp_chunks.append(chunk) else: # if it passed, no need to ask google what they mean cuz I already have a good translation for this chunk # this will be added to the dict way later localtrans_dict[chunk] = trans # 4. packetize them into fewer requests (and if auto, choose whether to use chunks or not) jp_chunks_packets = packetize_translate_requests(jp_chunks) jp_bodies_packets = packetize_translate_requests(bodies) if strategy == 2: use_chunk_strat = (len(jp_chunks_packets) < len(jp_bodies_packets)) # 5. check the translate budget to see if I can afford this if use_chunk_strat: num_calls = len(jp_chunks_packets) else: num_calls = len(jp_bodies_packets) global _DISABLE_INTERNET_TRANSLATE if check_translate_budget(num_calls) and not _DISABLE_INTERNET_TRANSLATE: core.MY_PRINT_FUNC( "... making %d requests to Google Translate web API..." % num_calls) else: # no need to print failing statement, the function already does # don't quit early, run thru the same full structure & eventually return a copy of the JP names core.MY_PRINT_FUNC( "Just copying JP -> EN while Google Translate is disabled") _DISABLE_INTERNET_TRANSLATE = True # 6. send chunks to Google results_packets = [] if use_chunk_strat: for d, packet in enumerate(jp_chunks_packets): core.print_progress_oneline(d / len(jp_chunks_packets)) r = _single_google_translate(packet) results_packets.append(r) # 7. assemble Google responses & re-associate with the chunks # order of inputs "jp_chunks" matches order of outputs "results" results = unpacketize_translate_requests(results_packets) # unpack google_dict = dict(zip(jp_chunks, results)) # build dict print("#items=", len(in_list), "#chunks=", len(jp_chunks), "#requests=", len(jp_chunks_packets)) print(google_dict) google_dict.update( localtrans_dict ) # add dict entries from things that succeeded localtrans google_dict.update( translation_tools.words_dict ) # add the full-blown words dict to the chunk-translate results # dict->list->sort->dict: sort the longest chunks first, VERY CRITICAL so things don't get undershadowed!!! google_dict = dict( sorted(list(google_dict.items()), reverse=True, key=lambda x: len(x[0]))) # 8. piecewise translate using newly created dict outlist = translation_tools.piecewise_translate(bodies, google_dict) else: # old style: just translate the strings directly and return their results for d, packet in enumerate(jp_bodies_packets): core.print_progress_oneline(d / len(jp_bodies_packets)) r = _single_google_translate(packet) results_packets.append(r) outlist = unpacketize_translate_requests(results_packets) # last, reattach the indents and suffixes outlist_final = [i + b + s for i, b, s in zip(indents, outlist, suffixes)] if not _DISABLE_INTERNET_TRANSLATE: # if i did use internet translate, print this line when done core.MY_PRINT_FUNC("... done!") # return if input_is_str: return outlist_final[ 0] # if original input was a single string, then de-listify else: return outlist_final # otherwise return as a list