def main():
    m = core.prompt_user_filename(".pmx")

    pmx = pmxlib.read_pmx(m)

    core.MY_PRINT_FUNC("")
    # valid input is any string that can matched aginst a morph idx
    s = core.MY_GENERAL_INPUT_FUNC(
        lambda x: morph_scale.get_idx_in_pmxsublist(x, pmx.morphs) is not None,
        [
            "Please specify the target morph: morph #, JP name, or EN name (names are not case sensitive).",
            "Empty input will quit the script."
        ])
    # do it again, cuz the lambda only returns true/false
    morph = morph_scale.get_idx_in_pmxsublist(s, pmx.morphs)
    print(pmx.morphs[morph].name_jp)

    newmorphitems = []

    print("target morph controls %d verts" % len(pmx.morphs[morph].items))
    count = 0

    for item in pmx.morphs[morph].items:
        item: pmxstruct.PmxMorphItemVertex

        v = pmx.verts[item.vert_idx]
        wtype = v.weighttype
        w = v.weight
        # already know its all mode1

        rot = 0
        # only care about BDEF2, right? or sdef
        # if not a bdef2 vertex, then rot=0 meaning no change
        if wtype == 1 or wtype == 3:
            for b, r in zip(matchbones, rotamt):
                # get the weight %, multiply it by how much the bone is rotated by
                if w[0] == b:
                    rot += r * w[2]
                elif w[1] == b:
                    rot += r * (1 - w[2])
            # count how many actually get rotated
            if rot != 0: count += 1
        # convert from degrees to radians for rotate2d()
        rot = math.radians(rot)

        # now the YZ component of the morph vector is rotated around the origin
        ny, nz = core.rotate2d((0, 0), rot, item.move[1:3])
        newitem = pmxstruct.PmxMorphItemVertex(item.vert_idx,
                                               [item.move[0], ny, nz])
        newmorphitems.append(newitem)

    print("partial-rotated %d verts" % count)

    newmorph = pmxstruct.PmxMorph("v-rot", "v-rot", 1, 1, newmorphitems)
    pmx.morphs.append(newmorph)
    # done iter, now write
    OUT = core.get_unused_file_name("NEW.pmx")
    pmxlib.write_pmx(OUT, pmx)
    print("done")
Esempio n. 2
0
def main():
    pmxname = core.prompt_user_filename(".pmx")

    pmx = pmxlib.read_pmx(pmxname, moreinfo=True)

    # first, attach the vert index to the vert object, so i can determine the before-after map
    idxlist = list(range(len(pmx.verts)))
    vertlist = list(zip(pmx.verts, idxlist))
    # lambda func to use for sorting: returns a list of keys to sort by
    # use + to append lists
    # this key will sort by u then by v then by x then by y then by z
    sortkey = lambda x: x[0].uv + x[0].pos
    # then, sort the list
    print("sorting")
    vertlist.sort(key=sortkey)

    # unzip
    new_vertlist = [a for a, b in vertlist]
    old_idxs = [b for a, b in vertlist]

    # put the newly sorted list into the pmx struct
    pmx.verts = new_vertlist

    # build a map of old index to new index
    old_to_new = dict(zip(old_idxs, idxlist))

    # now update all faces
    print("doing faces")
    for f in pmx.faces:
        for i in range(3):
            f[i] = old_to_new[f[i]]

    # now update all morphs
    print("doing morphs")
    for m in pmx.morphs:
        if m.morphtype == pmxstruct.MorphType.VERTEX:  #vertex
            for item in m.items:
                item.vert_idx = old_to_new[item.vert_idx]
        if m.morphtype in (
                pmxstruct.MorphType.UV,
                pmxstruct.MorphType.UV_EXT1,
                pmxstruct.MorphType.UV_EXT2,
                pmxstruct.MorphType.UV_EXT3,
                pmxstruct.MorphType.UV_EXT4,
        ):  # uv
            for item in m.items:
                item.vert_idx = old_to_new[item.vert_idx]

    # softbodies: eh, who cares

    pmxname_done = pmxname[:-4] + "_Vsort.pmx"
    pmxlib.write_pmx(pmxname_done, pmx, moreinfo=True)
    print("done")
Esempio n. 3
0
def main():
    pmxname = core.prompt_user_filename(".pmx")
    pmxname_done = "edgeweightapplied.pmx"
    maskname = core.prompt_user_filename(".png")

    pmx1 = pmxlib.read_pmx(pmxname, moreinfo=True)

    im = Image.open(maskname).convert('RGBA')
    # isolate only one of the layers, all values should be equal
    r, g, b, a = im.split()

    px = r.load()

    # if uv = 1, then access index 4095 not 4096
    print(im.size)
    s = (im.size[0] - 1, im.size[1] - 1)

    print("numverts =", len(pmx1.verts))
    for d, v in enumerate(pmx1.verts):
        progprint(d / len(pmx1.verts))

        # have vertex v
        # convert uv coords to nearest pixel idx
        # which is x/y??
        # do i need to invert an axis?
        x = round(v.uv[0] * s[0])
        y = round(v.uv[1] * s[1])
        # get the pixel at this xy
        p = px[x, y]
        # print(p)

        # convert pixel value 0-255 to 0-1 edge factor
        # not sure whether i need to invert or not? 50/50 shot so lets go
        e = p / 255

        # store into v
        v.edgescale = e

        pass
    pmxlib.write_pmx(pmxname_done, pmx1, moreinfo=True)
    print("done")
"""

import sys
try:
    sys.path.append("../")
    from python import nuthouse01_core as core
    from python import nuthouse01_pmx_parser as pmxlib
except ImportError as eee:
    print(eee)
    print(
        "ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!"
    )
    print("...press ENTER to exit...")
    input()
    exit()
    core = pmxlib = None

##################
# copy normals
###################

pmx_flatrot = pmxlib.read_pmx("nano feetflatrot.pmx")
pmx_point = pmxlib.read_pmx("nano feetpoint3.pmx")

# copy normals of steephalf to point

for v_flatrot, v_point in zip(pmx_flatrot.verts, pmx_point.verts):
    v_point.norm = v_flatrot.norm

pmxlib.write_pmx("FINAL.pmx", pmx_point)
def main():
    print(
        "Transfer a morph from one model to another, assuming the geometry is in the same position"
    )
    print("Needs source PMX, source morph, and dest PMX")

    # prompt PMX name
    print("Please enter name of DESTINATION PMX model file:")
    dest_name_pmx = core.prompt_user_filename(".pmx")
    dest_pmx = pmxlib.read_pmx(dest_name_pmx, moreinfo=True)

    # prompt PMX name
    print("Please enter name of SOURCE PMX model file:")
    source_name_pmx = core.prompt_user_filename(".pmx")
    source_pmx = pmxlib.read_pmx(source_name_pmx, moreinfo=True)

    while True:
        print("Please enter/paste JP name of morph to transfer:")
        s = input("name: >")
        # exit condition is empty input
        if s == "": break

        # find the morph with the matching name
        source_morph_idx = core.my_list_search(source_pmx.morphs,
                                               lambda x: x.name_jp == s)
        if source_morph_idx is None:
            print("err: could not find that name, try again")
            continue

        # verify vertex morph
        source_morph = source_pmx.morphs[source_morph_idx]
        if source_morph.morphtype != 1:
            print("err: for now, only support vertex morphs")
            continue

        newmorph = pmxstruct.PmxMorph(name_jp=source_morph.name_jp,
                                      name_en=source_morph.name_en,
                                      panel=source_morph.panel,
                                      morphtype=source_morph.morphtype,
                                      items=[])
        # have source, have dest, have morph
        # begin iterating!
        # for each vert ref in vertex morph, go to vert in source PMX to get position
        #
        already_used_verts = set()

        for asdf, morphitem in enumerate(source_morph.items):
            core.print_progress_oneline(asdf / len(source_morph.items))
            vertid = morphitem.vert_idx
            vertpos = source_pmx.verts[vertid].pos  # get vertex xyz
            # find the vert or verts in dest_pmx that correspond to this vert in source_pmx
            # problem: multiple vertices at the same location
            # in most cases, all verts at a location will move the same amount... but not always? how to handle?
            # TODO check thru source pmx morph for "diverging" vertices like this? same location in source but not same offset?
            # if some get left behind that's OK, that's usually material borders, easy to use morph editor, only see materials I don't want to morph, and remove those verts from the morph
            # solution: all verts within some radius? not perfect solution
            # radius is hardcoded... if no dest vert found within radius, then what? warn & report nearest?
            # maybe find nearest vertex, and then find all vertices within 110% of that radius?

            # calculate dist from here to each vert in dest_pmx
            # find all verts within this dist threshold
            matching_verts = []
            dist_list = []
            for d, v2 in enumerate(dest_pmx.verts):
                dist = core.my_euclidian_distance(
                    [vertpos[i] - v2.pos[i] for i in range(3)])
                dist_list.append(dist)
                if dist < THRESHOLD:
                    matching_verts.append(d)
            if not matching_verts:
                print(
                    "warning: unable to find any verts within the threshold for source vert ID %d"
                    % vertid)
                print("nearest vert is dist=%f" % min(dist_list))
            for v in matching_verts:
                if v not in already_used_verts:
                    already_used_verts.add(v)
                    newitem = pmxstruct.PmxMorphItemVertex(vert_idx=v,
                                                           move=morphitem.move)
                    newmorph.items.append(newitem)

            pass  # end of for-each-morphitem loop

        # done building the new morph, hopefully
        # make the vertices sorted cuz i can
        newmorph.items.sort(key=lambda x: x.vert_idx)

        if len(newmorph.items) != len(source_morph.items):
            print(
                "warning: length mismatch! source has %d and new has %d, this requires closer attention"
                % (len(source_morph.items), len(newmorph.items)))

        # add it to the dest pmx
        dest_pmx.morphs.append(newmorph)

        pass  # end of while-loop

    print("DONE")
    pmxlib.write_pmx("TRANSFER.pmx", dest_pmx)

    return None
def main():
    print(
        "Open all PMX files at the selected level and replace usages of texure file XXXXX with YYYYY"
    )

    core.MY_PRINT_FUNC("Please enter name of PMX model file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")

    # absolute path to directory holding the pmx
    input_filename_pmx_abs = os.path.normpath(
        os.path.abspath(input_filename_pmx))
    startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs)

    # =========================================================================================================
    # =========================================================================================================
    # =========================================================================================================
    # first, build the list of ALL files that actually exist, then filter it down to neighbor PMXs
    relative_all_exist_files = file_sort_textures.walk_filetree_from_root(
        startpath)
    # now fill "neighbor_pmx" by finding files without path separator that end in PMX
    # these are relative paths tho
    pmx_filenames = [
        f for f in relative_all_exist_files
        if (f.lower().endswith(".pmx")) and (os.path.sep not in f)
    ]

    # now read all the PMX objects & store in dict alongside the relative name
    # dictionary where keys are filename and values are resulting pmx objects
    all_pmx_obj = {}
    for this_pmx_name in pmx_filenames:
        this_pmx_obj = pmxlib.read_pmx(os.path.join(startpath, this_pmx_name),
                                       moreinfo=False)
        all_pmx_obj[this_pmx_name] = this_pmx_obj

    core.MY_PRINT_FUNC("ALL PMX FILES:")
    for pmxname in pmx_filenames:
        core.MY_PRINT_FUNC("    " + pmxname)

    core.MY_PRINT_FUNC("\n\n\n")
    core.MY_PRINT_FUNC(
        "WARNING: this script will overwrite all PMX files it operates on. This does NOT create a backup. Be very careful what you type!"
    )
    core.MY_PRINT_FUNC("\n\n\n")

    findme = core.MY_GENERAL_INPUT_FUNC(
        lambda x: True, "Please specify which filepath to find:")
    findme = os.path.normpath(findme.strip())  # sanitize it
    # if empty, quit
    if findme == "" or findme is None:
        core.MY_PRINT_FUNC("quitting")
        return None

    replacewith = core.MY_GENERAL_INPUT_FUNC(
        lambda x: True, "Please specify which filepath to replace it with:")
    replacewith = os.path.normpath(replacewith.strip())  # sanitize it

    # if empty, quit
    if replacewith == "" or replacewith is None:
        core.MY_PRINT_FUNC("quitting")
        return None

    core.MY_PRINT_FUNC("Replacing '%s' with '%s'" % (findme, replacewith))

    # now do find & replace!
    # for each pmx,
    for this_pmx_name, this_pmx_obj in all_pmx_obj.items():
        # do find-and-replace
        howmany = file_sort_textures.texname_find_and_replace(this_pmx_obj,
                                                              findme,
                                                              replacewith,
                                                              sanitize=True)
        # then report how many
        core.MY_PRINT_FUNC("")
        core.MY_PRINT_FUNC("'%s': replaced %d" % (this_pmx_name, howmany))

        if howmany != 0:
            # NOTE: this is OVERWRITING THE PREVIOUS PMX FILE, NOT CREATING A NEW ONE
            # because I make a zipfile backup I don't need to feel worried about preserving the old version
            output_filename_pmx = os.path.join(startpath, this_pmx_name)
            # output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
            pmxlib.write_pmx(output_filename_pmx, this_pmx_obj, moreinfo=False)

    core.MY_PRINT_FUNC("Done!")
    return None
Esempio n. 7
0
			for returnval in recursively_compare(A, B):
				# this yields and lets me run each time it finds something that does not match
				yield [idx] + returnval

		# if not float and not iterable, then use standard compare
		else:
			if A != B:
				yield [idx]
	return





f1 = "foobar.pmx"
pmx1 = pmxlib.read_pmx(f1)

f2 = "whatev.pmx"
pmx2 = pmxlib.read_pmx(f2)


# i am giving the function two lists to walk in parallel, NOT two items
alldiff = recursively_compare(pmx1.list(), pmx2.list())
# it's an iterator thing so i need to iterate on it before it becomes a true list
# aka cast it to a list
alldiff = list(alldiff)
print(len(alldiff))

noverts = [d for d in alldiff if not (d[0] == 1 or d[0] == 2)]
print(len(noverts))
# for diff in noverts:
def main(moreinfo=True):
    # prompt PMX name
    core.MY_PRINT_FUNC("Please enter name of PMX input file:")
    input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
    pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)

    # coordinates are stored as list[x, y, z], convert this --> tuple --> hash for much faster comparing
    vert_coord_hashes = [hash(tuple(v.pos)) for v in pmx.verts]

    all_vert_sets = []
    all_face_sets = []
    all_bone_indices = []
    all_rigidbody_indices = []

    start_vert = 0
    start_face = 0
    # i know i'm done when i have consumed all verts & all faces
    while start_face < len(pmx.faces) and start_vert < len(pmx.verts):
        # 1. start a new sets for the vertices and faces
        vert_set = set()
        face_set = set()
        # 2. pick a vertex that hasn't been used yet and add it to the set, ez
        # 2b. optimization: a fragment is guaranteed to have at least 4 faces (to make a closed 3d solid) and therefore at least 4 verts
        # can i safely assume that they are "sharp" corners and therefore there are 12 verts?
        for i in range(12):
            vert_set.add(start_vert + i)
        # also, init the faces set with the minimum of 4 faces, and add any verts included in those faces to the vert set
        for i in range(4):
            face_set.add(start_face + i)
            for v in pmx.faces[start_face + i]:  # for each vert in this face,
                vert_set.add(v)  # add this vert to the vert set
        # guarantee that it is contiguous from start_vert to the highest index that was in the faces
        vert_set = set(list(range(start_vert, max(vert_set) + 1)))
        # now i have initialized the set with everything i know is guarnateed part of the fragment

        highest_known_vert = max(vert_set)
        highest_known_face = max(face_set)

        # print(str(len(vert_set)) + " ", end="")

        # begin looping & flooding until i don't detect any more
        while True:
            # 3. note the number of verts collected so far
            set_size_A = len(vert_set)

            # 4. find all faces that include any vertex in the "fragment set",
            # whenever i find one, add all verts that it includes to the "fragment set" as well
            '''
			# zero-assumption brute-force method:
			for f_id in range(len(pmx.faces)):
				face = pmx.faces[f_id]
				if face[0] in vert_set or face[1] in vert_set or face[2] in vert_set: # we got a hit!
					face_set.add(f_id)
					vert_set.add(face[0])
					vert_set.add(face[1])
					vert_set.add(face[2])
			'''
            # optimization: scan only faces index 'highest_known_face+1' thru 'highest_known_face'+LOOKAHEAD
            #	because 0 thru start_face is guaranteed to not be part of the group
            #	and start_face thru highest_known_face is already guaranteed to be part of the group
            #	if chunks are bigger than LOOKAHEAD, then it's not guaranteed to succeed or fail, could do either
            for f_id in range(
                    highest_known_face + 1,
                    min(highest_known_face + LOOKAHEAD, len(pmx.faces))):
                face = pmx.faces[f_id]
                if face[0] in vert_set or face[1] in vert_set or face[
                        2] in vert_set:
                    # we got a hit!
                    face_set.add(f_id)
                    vert_set.add(face[0])
                    vert_set.add(face[1])
                    vert_set.add(face[2])
                    # optimization: if this is farther than what i thought was the end, then everything before it should be added too
                    if f_id > highest_known_face:
                        for x in range(highest_known_face + 1, f_id):
                            face_set.add(x)
                            vert_set.add(pmx.faces[x][0])
                            vert_set.add(pmx.faces[x][1])
                            vert_set.add(pmx.faces[x][2])
                        highest_known_face = f_id

            set_size_B = len(vert_set)

            # update the set of vertex coord hashes for easier comparing
            vert_set_hashes = set([vert_coord_hashes[i] for i in vert_set])
            # 5. find all vertices that have the same exact coordinates as any vertex in the "fragment set",
            # then and add them to the "fragment set"
            '''
			# zero-assumption brute-force method:
			for v_id in range(len(vert_coord_hashes)):
				vert_hash = vert_coord_hashes[v_id]
				if vert_hash in vert_set_hashes: # we got a hit!
					vert_set.add(v_id)
			'''
            # optimization: scan only verts index 'highest_known_vert+1' thru 'highest_known_vert'+LOOKAHEAD
            #	because 0 thru start_vert is guaranteed to not be part of the group
            #	and start_vert thru highest_known_vert is already guaranteed to be part of the group
            #	if chunks are bigger than LOOKAHEAD, then it's not guaranteed to succeed or fail, could do either
            for v_id in range(
                    highest_known_vert + 1,
                    min(highest_known_vert + LOOKAHEAD, len(pmx.verts))):
                vert_hash = vert_coord_hashes[v_id]
                if vert_hash in vert_set_hashes:
                    # we got a hit!
                    vert_set.add(v_id)
                    # optimization: if this is farther than what i thought was the end, then everything before it should be added too
                    if v_id > highest_known_vert:
                        for x in range(highest_known_vert + 1, v_id):
                            vert_set.add(x)
                        highest_known_vert = v_id

            set_size_C = len(vert_set)

            print("+%d +%d, " %
                  (set_size_B - set_size_A, set_size_C - set_size_B),
                  end="")

            # 6. if the number of verts did not change, we are done
            if set_size_C == set_size_A:
                break
            pass
        print("")
        # 7. now i have a complete fragment in vert_set and face_set !! :)
        all_vert_sets.append(vert_set)
        all_face_sets.append(face_set)
        # increment the face-start and vert-start indices, this is my stop condition
        start_vert += len(vert_set)
        start_face += len(face_set)
        # move on to the next fragment if i still have more verts to parse
        pass
    # done with identifying all fragments!

    # double-check that all vertices got sorted into one and only one fragment
    assert sum([len(vs) for vs in all_vert_sets]) == len(pmx.verts)
    temp = set()
    for vs in all_vert_sets:
        temp.update(vs)
    assert len(temp) == len(pmx.verts)

    # double-check that all faces got sorted into one and only one fragment
    assert sum([len(fs) for fs in all_face_sets]) == len(pmx.faces)
    temp = set()
    for fs in all_face_sets:
        temp.update(fs)
    assert len(temp) == len(pmx.faces)

    print("")
    print("Identified %d discrete fragments!" % (len(all_vert_sets), ))

    # BONES AND WEIGHTS
    for fragnum in range(len(all_vert_sets)):
        # name
        newbone_name = "fragment%d" % fragnum
        # position: average of all vertices in the fragment? sure why not
        # TODO is there a "better" way of calculating the average/centroid/center of mass? idk
        newbone_pos = [0, 0, 0]
        for v_id in all_vert_sets[fragnum]:
            # accumulate the XYZ for each vertex in the fragment
            newbone_pos[0] += pmx.verts[v_id].pos[0]
            newbone_pos[1] += pmx.verts[v_id].pos[1]
            newbone_pos[2] += pmx.verts[v_id].pos[2]
        # divide by the number of verts in the fragment to get the average
        newbone_pos[0] /= len(all_vert_sets[fragnum])
        newbone_pos[1] /= len(all_vert_sets[fragnum])
        newbone_pos[2] /= len(all_vert_sets[fragnum])
        # create the new bone object
        newbone_obj = pmxstruct.PmxBone(
            name_jp=newbone_name,
            name_en=newbone_name,
            pos=newbone_pos,
            parent_idx=0,
            deform_layer=0,
            deform_after_phys=False,
            has_rotate=True,
            has_translate=True,
            has_visible=True,
            has_enabled=True,
            has_ik=False,
            tail_usebonelink=False,
            tail=[0, 0, 0],
            inherit_rot=False,
            inherit_trans=False,
            has_fixedaxis=False,
            has_localaxis=False,
            has_externalparent=False,
        )
        # note the index it will be inserted at
        thisboneindex = len(pmx.bones)
        all_bone_indices.append(thisboneindex)
        # append it onto the list of bones
        pmx.bones.append(newbone_obj)
        # for each vertex in this fragment, give it 100% weight on that bone
        for v_id in all_vert_sets[fragnum]:
            v = pmx.verts[v_id]
            v.weighttype = pmxstruct.WeightMode.BDEF1  # BDEF1
            v.weight = [[thisboneindex, 1]]
        pass

    # RIGID BODIES
    for fragnum in range(len(all_vert_sets)):
        newbody_name = "body%d-0" % fragnum
        newbody_pos = pmx.bones[all_bone_indices[fragnum]].pos
        # hmmm, what do do here? this is the really hard part!
        # let's just make a sphere with radius equal to the distance to the nearest vertex of this fragment?
        # TODO: the bodies created from this are intersecting eachother when at rest!
        #  the distance to the closest vertex is greater than the distance to the closest point on the closest face!
        #  therefore there is a small bit of overlap
        newbody_radius = dist_to_nearest_vertex(newbody_pos,
                                                all_vert_sets[fragnum], pmx)

        # TODO: to "fill a fragment with several rigidbody spheres", you need to a) select a center for each, b) select a size for each
        #  the sizes can come from algorithm roughed out in dist_to_nearest_point_on_mesh_surface()
        #  the centers... idk? how can you do this?
        #  https://doc.babylonjs.com/toolsAndResources/utilities/InnerMeshPoints might be able to reuse some of the ideas from this?

        # phys params: set mass equal to the VOLUME of this new rigid body! oh that seems clever, i like that, bigger ones are heavier
        # if i figure out how to create multiple bodies, each body's mass should be proportional to its volume like this
        volume = 3.14 * (4 / 3) * (newbody_radius**3)
        mass = volume * MASS_FACTOR
        # phys params: use the default damping/friction/etc parameters cuz idk why not
        phys_move_damp = 0.95
        phys_rot_damp = 0.95
        phys_friction = 0.95
        phys_repel = 0.3  # bounciness?

        # this gif is with these params: https://gyazo.com/3d143f33b79c1151c1ccbffcc578448b

        # groups: for now, since each fragment is only one body, i can just ignore groups stuff
        # groups: later, if each fragment is several bodies... assign the groups in round-robin? each fragment will clip thru 1/15 of the
        # other fragments but i think that's unavoidable. also need to reserve group16 for the floor! so set each fragment's cluster of
        # bodies to nocollide with the group# assigned to that cluster, but respect all others.

        # bone_idx: if there are more than 1 rigidbodies associated with each fragment, one "main" body is connected to the bone
        # all the others are set to bone -1 and connected to the mainbody via joints
        newbody_obj = pmxstruct.PmxRigidBody(
            name_jp=newbody_name,
            name_en=newbody_name,
            bone_idx=all_bone_indices[fragnum],
            pos=newbody_pos,
            rot=[0, 0, 0],
            size=[newbody_radius, 0, 0],
            shape=pmxstruct.RigidBodyShape.SPHERE,
            group=1,
            nocollide_set=set(),
            phys_mode=pmxstruct.RigidBodyPhysMode.PHYSICS,
            phys_mass=mass,
            phys_move_damp=phys_move_damp,
            phys_rot_damp=phys_rot_damp,
            phys_repel=phys_repel,
            phys_friction=phys_friction)

        # note the index that this will be inserted at
        bodyindex = len(pmx.rigidbodies)
        all_rigidbody_indices.append(bodyindex)
        pmx.rigidbodies.append(newbody_obj)
        pass

    # JOINTS
    # if there is only one body per fragment then this is okay without any joints
    # if there are several bodies then we need to create joints from the "center" rigidbody to the others
    # even if you try to limit the joint to 0 rotation and 0 slide it still has some wiggle in it :( not perfectly rigid
    # TODO: i'll deal with this if and only if an algorithm for filling fragments with rigidbodies is created
    for fragnum in range(len(all_vert_sets)):
        pass

    core.MY_PRINT_FUNC("")

    # write out
    output_filename_pmx = input_filename_pmx[0:-4] + "_fragfix.pmx"
    output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
    pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
    core.MY_PRINT_FUNC("Done!")
    return None
Esempio n. 9
0
def main(moreinfo=False):

    # 1. user input
    core.MY_PRINT_FUNC("Current dir = '%s'" % os.getcwd())
    core.MY_PRINT_FUNC(
        "Enter the path to the root folder that contains ALL models:")
    while True:
        name = input("root folder = ")
        if not os.path.isdir(name):
            core.MY_PRINT_FUNC(os.path.abspath(name))
            core.MY_PRINT_FUNC(
                "Err: given folder does not exist, did you type it wrong?")
        else:
            break
    # it exists, so make it absolute
    rootdir = os.path.abspath(os.path.normpath(name))

    core.MY_PRINT_FUNC("root folder = '%s'" % rootdir)

    core.MY_PRINT_FUNC("")

    core.MY_PRINT_FUNC("... beginning to index file tree...")
    # 2. build list of ALL file on the system within this folder
    relative_all_exist_files = file_sort_textures.walk_filetree_from_root(
        rootdir)
    core.MY_PRINT_FUNC("... total # of files:", len(relative_all_exist_files))
    relative_all_pmx = [
        f for f in relative_all_exist_files if f.lower().endswith(".pmx")
    ]
    core.MY_PRINT_FUNC("... total # of PMX models:", len(relative_all_pmx))
    relative_exist_img_files = [
        f for f in relative_all_exist_files if f.lower().endswith(IMG_EXT)
    ]
    core.MY_PRINT_FUNC("... total # of image sources:",
                       len(relative_exist_img_files))

    core.MY_PRINT_FUNC("")

    # this will accumulate the list of PMXes
    list_of_pmx_with_missing_tex = []

    list_of_pmx_that_somehow_failed = []

    # 3. for each pmx,
    for d, pmx_name in enumerate(relative_all_pmx):
        # progress print
        core.MY_PRINT_FUNC("\n%d / %d" % (d + 1, len(relative_all_pmx)))
        # wrap the actual work with a try-catch just in case
        # this is a gigantic time investment and I dont want it to fail halfway thru and lose everything
        try:
            # 4. read the pmx, gotta store it in the dict like this cuz shut up thats why
            # dictionary where keys are filename and values are resulting pmx objects
            all_pmx_obj = {}
            this_pmx_obj = pmxlib.read_pmx(os.path.join(rootdir, pmx_name),
                                           moreinfo=False)
            all_pmx_obj[pmx_name] = this_pmx_obj

            # 5. filter images down to only images underneath the same folder as the pmx
            pmx_folder = os.path.dirname(pmx_name).lower()
            possible_img_sources = [
                f for f in relative_exist_img_files
                if f.lower().startswith(pmx_folder)
            ]
            # trim the leading "pmx_folder" portion from these names
            possible_img_sources = [
                os.path.relpath(f, pmx_folder) for f in possible_img_sources
            ]

            # 6. make filerecord_list
            # for each pmx, for each file on disk, match against files used in textures (case-insensitive) and replace with canonical name-on-disk
            # also fill out how much and how each file is used, and unify dupes between files, all that good stuff
            filerecord_list = file_sort_textures.build_filerecord_list(
                all_pmx_obj, possible_img_sources, False)

            # 7. if within filerecordlist, any filerecord is used but does not exist,
            if any(((fr.numused != 0) and (not fr.exists))
                   for fr in filerecord_list):
                # then save this pmx name
                list_of_pmx_with_missing_tex.append(pmx_name)
        except Exception as e:
            core.MY_PRINT_FUNC(e.__class__.__name__, e)
            core.MY_PRINT_FUNC(
                "ERROR! some kind of exception interrupted reading pmx '%s'" %
                pmx_name)
            list_of_pmx_that_somehow_failed.append(pmx_name)

    core.MY_PRINT_FUNC("\n\n")

    # make the paths absolute
    list_of_pmx_that_somehow_failed = [
        os.path.join(rootdir, p) for p in list_of_pmx_that_somehow_failed
    ]
    list_of_pmx_with_missing_tex = [
        os.path.join(rootdir, p) for p in list_of_pmx_with_missing_tex
    ]

    # print & write-to-file
    if list_of_pmx_that_somehow_failed:
        core.MY_PRINT_FUNC("WARNING: failed in some way on %d PMX files" %
                           len(list_of_pmx_that_somehow_failed))
        core.MY_PRINT_FUNC("Writing the full list to text file:")
        output_filename_failures = core.get_unused_file_name(FAILED_LIST_FILE)
        core.write_list_to_txtfile(output_filename_failures,
                                   list_of_pmx_that_somehow_failed)
    core.MY_PRINT_FUNC(
        "Found %d / %d PMX files that are missing at least one texture source"
        % (len(list_of_pmx_with_missing_tex), len(relative_all_pmx)))
    core.MY_PRINT_FUNC("Writing the full list to text file:")
    output_filename_missingtex = core.get_unused_file_name(
        MISSINGTEX_LIST_FILE)
    core.write_list_to_txtfile(output_filename_missingtex,
                               list_of_pmx_with_missing_tex)

    # print(list_of_pmx_with_missing_tex)

    core.MY_PRINT_FUNC("Done!")
    return None
Esempio n. 10
0
try:
	sys.path.append("../")
	from python import nuthouse01_core as core
	from python import nuthouse01_pmx_parser as pmxlib
except ImportError as eee:
	print(eee)
	print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
	print("...press ENTER to exit...")
	input()
	exit()
	core = pmxlib = None




##################
# copy normals
###################

pmx_from = pmxlib.read_pmx("from.pmx")
pmx_to =   pmxlib.read_pmx("to.pmx")

# copy normals of steephalf to point

assert len(pmx_from.verts) == len(pmx_to.verts)

for v_from, v_to in zip(pmx_from.verts, pmx_to.verts):
	v_to.norm = v_from.norm

pmxlib.write_pmx("FINAL.pmx", pmx_to)