def _real_flatten_uv(mesh, reference_edge):
    no_processed_count = 0

    if mesh.uv_layers.active is None:
        # if no uv, create it
        mesh.uv_layers.new(do_init=False)

    bm = bmesh.from_edit_mesh(mesh)
    uv_lay = bm.loops.layers.uv.active
    for face in bm.faces:
        if not face.select:
            continue

        allPoint = len(face.loops)

        if allPoint <= reference_edge:
            no_processed_count += 1
            continue

        # get correct new corrdinate system
        p1Relative = reference_edge
        p2Relative = reference_edge + 1
        p3Relative = reference_edge + 2
        if p2Relative >= allPoint:
            p2Relative -= allPoint
        if p3Relative >= allPoint:
            p3Relative -= allPoint

        p1 = mathutils.Vector(
            tuple(face.loops[p1Relative].vert.co[x] for x in range(3)))
        p2 = mathutils.Vector(
            tuple(face.loops[p2Relative].vert.co[x] for x in range(3)))
        p3 = mathutils.Vector(
            tuple(face.loops[p3Relative].vert.co[x] for x in range(3)))

        new_y_axis = p2 - p1
        new_y_axis.normalize()
        vec1 = p3 - p2
        vec1.normalize()

        new_z_axis = new_y_axis.cross(vec1)
        new_z_axis.normalize()
        new_x_axis = new_y_axis.cross(new_z_axis)
        new_x_axis.normalize()

        # construct rebase matrix
        origin_base = mathutils.Matrix(((1.0, 0, 0), (0, 1.0, 0), (0, 0, 1.0)))
        origin_base.invert()
        new_base = mathutils.Matrix(
            ((new_x_axis.x, new_y_axis.x,
              new_z_axis.x), (new_x_axis.y, new_y_axis.y, new_z_axis.y),
             (new_x_axis.z, new_y_axis.z, new_z_axis.z)))
        transition_matrix = origin_base @ new_base
        transition_matrix.invert()

        # process each face
        for loop_index in range(allPoint):
            pp = mathutils.Vector(
                tuple(face.loops[loop_index].vert.co[x] for x in range(3)))
            vec = pp - p1
            new_vec = transition_matrix @ vec

            face.loops[loop_index][uv_lay].uv = (
                (new_vec.x if new_vec.x >= 0 else -new_vec.x) / 5,
                (new_vec.y) / 5)

    # Show the updates in the viewport
    bmesh.update_edit_mesh(mesh)

    return no_processed_count
Exemplo n.º 2
0
def write_file(
    filepath,
    objects,
    scene,
    EXPORT_TRI=False,
    EXPORT_EDGES=False,
    EXPORT_NORMALS=False,
    EXPORT_APPLY_MODIFIERS=True,
    EXPORT_GLOBAL_MATRIX=None,
):

    if EXPORT_GLOBAL_MATRIX is None:
        EXPORT_GLOBAL_MATRIX = mathutils.Matrix()

    print('CAO Export path: %r' % filepath)

    time1 = time.time()

    file = open(filepath, "w", encoding="utf8", newline="\n")
    fw = file.write

    # Write Header
    fw('# Blender v%s CAO File\n' % (bpy.app.version_string))

    # Initialize
    totverts = 1
    face_vert_index = 1
    copy_set = set()

    vertices = []
    faces = []
    lines = []
    facelines = []
    gcylinders = []
    gcircles = []

    # Get all meshes
    for ob_main in objects:

        # ignore dupli children
        if ob_main.parent and ob_main.parent.dupli_type in {'VERTS', 'FACES'}:
            continue

        obs = []
        if ob_main.dupli_type != 'NONE':
            # print('creating dupli_list on', ob_main.name)
            ob_main.dupli_list_create(scene)
            obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]

        else:
            obs = [(ob_main, ob_main.matrix_world)]

        try:
            ob_main["vp_model_types"]
        except:
            continue

        for ob, ob_mat in obs:

            try:
                me = ob.to_mesh(scene,
                                EXPORT_APPLY_MODIFIERS,
                                'PREVIEW',
                                calc_tessface=False)
            except RuntimeError:
                me = None

            if me is None or ob_main["vp_model_types"] not in [
                    "3D Faces", "3D Lines"
            ]:
                continue

            me.transform(EXPORT_GLOBAL_MATRIX *
                         ob_mat)  # Translate to World Coordinate System

            if EXPORT_TRI:
                mesh_triangulate(me)

            me_verts = me.vertices[:]

            face_index_pairs = [(face, index)
                                for index, face in enumerate(me.polygons)]

            if EXPORT_EDGES:
                edges = me.edges
            else:
                edges = []

            if not (len(face_index_pairs) + len(edges) + len(me.vertices)):
                bpy.data.meshes.remove(me)
                continue  # ignore this mesh.

            smooth_groups, smooth_groups_tot = (), 0

            # no materials
            if smooth_groups:
                sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth
                                                    else False]
            else:
                sort_func = lambda a: a[0].use_smooth

            face_index_pairs.sort(key=sort_func)
            del sort_func

            # fw('# %s\n' % (ob_main.name))

            # Vertices
            for v in me_verts:
                vertices.append(v.co[:])

            # Faces
            for f, f_index in face_index_pairs:
                f_v = [(vi, me_verts[v_idx], l_idx) for vi, (
                    v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))
                       ]
                f_side = []

                for vi, v, li in f_v:
                    f_side.append(totverts + v.index)

                # Lines/Edges
                if ob_main["vp_model_types"] == "3D Lines":
                    initialen = len(lines)
                    for i in range(0, len(f_side) - 1):
                        lines.append([f_side[i] - 1, f_side[i + 1] - 1])
                    lines.append([f_side[len(f_side) - 1] - 1, f_side[0] - 1])

                    facelines.append([
                        len(lines) - initialen,
                        list(range(initialen, len(lines)))
                    ])

                else:
                    faces.append(f_side)

            # Make the indices global rather then per mesh
            totverts += len(me_verts)

            # clean up
            bpy.data.meshes.remove(me)

        if ob_main["vp_model_types"] == "3D Cylinders":
            gcylinders.append(
                [len(vertices),
                 len(vertices) + 1, ob_main["vp_radius"]])  # Get radius
            vertices.append(ob_main["vp_obj_Point1"])
            vertices.append(ob_main["vp_obj_Point2"])
            totverts += 2

        elif ob_main["vp_model_types"] == "3D Circles":
            gcircles.append([
                ob_main["vp_radius"],
                len(vertices),
                len(vertices) + 1,
                len(vertices) + 2
            ])
            vertices.append(ob_main["vp_obj_Point1"])
            vertices.append(ob_main["vp_obj_Point2"])
            vertices.append(ob_main["vp_obj_Point3"])
            totverts += 3

        if ob_main.dupli_type != 'NONE':
            ob_main.dupli_list_clear()

    npoints = len(vertices)
    nfacepoints = len(faces)
    nlines = len(lines)
    nfacelines = len(facelines)
    ncylinders = len(gcylinders)
    ncircles = len(gcircles)

    text = TEMPLATE_CAO_FILE % {
        "nPoints": npoints,
        "points": "\n".join(generate_vertices(v) for v in vertices),
        "nLines": nlines,
        "lines": "\n".join(generate_lines(l) for l in lines),
        "nFacelines": nfacelines,
        "facelines": "\n".join(generate_facelines(fl) for fl in facelines),
        "nFacepoints": nfacepoints,
        "facepoints": "\n".join(generate_faces(f) for f in faces),
        "nCylinder": ncylinders,
        "cylinders": "\n".join(generate_cylinders(c) for c in gcylinders),
        "nCircles": ncircles,
        "circles": "\n".join(generate_circles(c) for c in gcircles)
    }

    text = text.replace(',', '').replace('{', '').replace('}', '').replace(
        '{', '').replace('[', '').replace(']', '')
    text = "".join([s for s in text.strip().splitlines(True) if s.strip()])

    fw(text)
    file.close()

    # copy all collected files.
    bpy_extras.io_utils.path_reference_copy(copy_set)

    print("Export time: %.2f" % (time.time() - time1))
Exemplo n.º 3
0
def export_lamp_instance(export_ctx, instance, name):
    lamp = instance.obj.data

    ntree = lamp.mitsuba_nodes.get_node_tree()
    params = {}

    if ntree:
        params = ntree.get_nodetree_dict(export_ctx, lamp)

    if not params:
        params = blender_lamp_to_dict(export_ctx, lamp)

        if params['type'] in {'rectangle', 'sphere'}:
            del params['emitter']['scale']

        else:
            del params['scale']

    if params and 'type' in params:
        try:
            hide_emitters = export_ctx.scene_data['integrator']['hideEmitters']

        except:
            hide_emitters = False

        if params['type'] in {'rectangle', 'disk'}:
            toworld = params.pop('toWorld')

            if 'value' in toworld:
                size_x = size_y = toworld['value']

            else:
                size_x = toworld['x']
                size_y = toworld['y']

            params.update({
                'id': '%s-arealight' % name,
                'toWorld': export_ctx.animated_transform(
                    [(t, m * mathutils.Matrix(((size_x, 0, 0, 0), (0, size_y, 0, 0), (0, 0, -1, 0), (0, 0, 0, 1)))) for (t, m) in instance.motion]
                ),
            })

            if hide_emitters:
                params.update({'bsdf': {'type': 'null'}})

        elif params['type'] in {'point', 'sphere'}:
            params.update({
                'id': '%s-pointlight' % name,
                'toWorld': export_ctx.animated_transform(instance.motion),
            })

            if hide_emitters:
                params.update({'bsdf': {'type': 'null'}})

        elif params['type'] in {'spot', 'directional', 'collimated'}:
            params.update({
                'id': '%s-%slight' % (name, params['type']),
                'toWorld': export_ctx.animated_transform(
                    [(t, m * mathutils.Matrix(((-1, 0, 0, 0), (0, 1, 0, 0), (0, 0, -1, 0), (0, 0, 0, 1)))) for (t, m) in instance.motion]
                ),
            })

        export_ctx.data_add(params)
Exemplo n.º 4
0
 def export_default(self):
     uvindex = 0
     mapping_type = "globalmapping3d"
     transformation = mathutils.Matrix()
     return mapping_type, uvindex, transformation
Exemplo n.º 5
0
#
######################################################

import mathutils
from morse.core import blenderapi

start_position = []
start_orientation = []
keyboard_ctrl_objects = []

robots = []
current_robot = 0
# Matrix.Translation((-1, 0, 2)) * Euler((rad(60), 0, rad(-90)), 'XYZ').to_matrix().to_4x4()
camera_to_robot_transform = mathutils.Matrix( (
    ( 0.0, 0.5,  -0.866, -1.0),
    (-1.0, 0.0,   0.0,    0.0),
    ( 0.0, 0.866, 0.5,    2.0),
    ( 0.0, 0.0,   0.0,    1.0) ) )

def store_default(contr):
    """ Save the initial position and orientation of the camera """
    global start_position
    global start_orientation

    camera = contr.owner
    # Create a copy of the current positions
    start_position = mathutils.Vector(camera.worldPosition)
    start_orientation = mathutils.Matrix(camera.worldOrientation)

    # look for objects that define the move_cameraFP property to
    # disable keyboard control of the camera
Exemplo n.º 6
0
# zero length vector
vec = mathutils.Vector((0.0, 0.0, 1.0))

# unit length vector
vec_a = vec.normalized()

vec_b = mathutils.Vector((0.0, 1.0, 2.0))

vec2d = mathutils.Vector((1.0, 2.0))
vec3d = mathutils.Vector((1.0, 0.0, 0.0))
vec4d = vec_a.to_4d()

# other mathutuls types
quat = mathutils.Quaternion()
matrix = mathutils.Matrix()

# Comparison operators can be done on Vector classes:

# greater and less then test vector length.
vec_a > vec_b
vec_a >= vec_b
vec_a < vec_b
vec_a <= vec_b

# ==, != test vector values e.g. 1,2,3 != 3,2,1 even if they are the same length
vec_a == vec_b
vec_a != vec_b


# Math can be performed on Vector classes
Exemplo n.º 7
0
def PerspectiveMatrix(fovx, aspect, near=0.1, far=1000.0):
    """Get internal camera matrix"""
    return mathutils.Matrix(
        [[2 / fovx, 0, 0, 0], [0, 2 * aspect / fovx, 0, 0],
         [0, 0, (far + near) / (far - near), (2 * far * near) / (near - far)],
         [0, 0, 1, 0]])
Exemplo n.º 8
0
def create_model(self, context, scale):
    """Create the actual model."""
    # FIXME: rewrite - Rewrite entire function (#35)
    global objects
    global ldColors
    global ldMaterials
    global fileName

    fileName = self.filepath
    # Attempt to get the directory the file came from
    # and add it to the `paths` list
    paths[0] = os.path.dirname(fileName)
    Console.log("Attempting to import {0}".format(fileName))

    # The file format as hinted to by
    # conventional file extensions is not supported.
    # Recommended: http://ghost.kirk.by/file-extensions-are-only-hints
    if fileName[-4:].lower() not in (".ldr", ".dat"):

        Console.log('''ERROR: Reason: Invalid File Type
Must be a .ldr or .dat''')
        self.report({'ERROR'}, '''Error: Invalid File Type
Must be a .ldr or .dat''')
        return {'ERROR'}

    # It has the proper file extension, continue with the import
    try:
        # Rotate and scale the parts
        # Scale factor is divided by 25 so we can use whole number
        # scale factors in the UI. For reference,
        # the default scale 1 = 0.04 to Blender
        trix = mathutils.Matrix((
            (1.0, 0.0, 0.0, 0.0),  # noqa
            (0.0, 0.0, 1.0, 0.0),  # noqa
            (0.0, -1.0, 0.0, 0.0),
            (0.0, 0.0, 0.0, 1.0)  # noqa
        )) * (scale / 25)

        # If LDrawDir does not exist, stop the import
        if not os.path.isdir(LDrawDir):  # noqa
            Console.log(''''ERROR: Cannot find LDraw installation at
{0}'''.format(LDrawDir))  # noqa
            self.report({'ERROR'}, '''Cannot find LDraw installation at
{0}'''.format(LDrawDir))  # noqa
            return {'CANCELLED'}

        # Instance the colors module and
        # load the LDraw-defined color definitions
        ldColors = Colors(LDrawDir, AltColorsOpt)  # noqa
        ldColors.load()
        ldMaterials = Materials(ldColors, context.scene.render.engine)

        LDrawFile(context, fileName, 0, trix)

        for cur_obj in objects:
            # The CleanUp import option was selected
            if CleanUpOpt:  # noqa
                Extra_Cleanup.main(cur_obj, LinkParts)  # noqa

            if GapsOpt:  # noqa
                Extra_Part_Gaps.main(cur_obj, scale)

        # The link identical parts import option was selected
        if LinkParts:  # noqa
            Extra_Part_Linked.main(objects)

        # Select all the mesh now that import is complete
        for cur_obj in objects:
            cur_obj.select = True

        # Update the scene with the changes
        context.scene.update()
        objects = []

        # Always reset 3D cursor to <0,0,0> after import
        bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)

        # Display success message
        Console.log("{0} successfully imported!".format(fileName))
        return {'FINISHED'}

    except Exception as e:
        Console.log("ERROR: {0}\n{1}\n".format(
            type(e).__name__, traceback.format_exc()))

        Console.log("ERROR: Reason: {0}.".format(type(e).__name__))

        self.report({'ERROR'}, '''File not imported ("{0}").
Check the console logs for more information.'''.format(type(e).__name__))
        return {'CANCELLED'}
def process_next_chunk(context, file, previous_chunk, importedObjects, IMAGE_SEARCH):
    from bpy_extras.image_utils import load_image

    #print previous_chunk.bytes_read, 'BYTES READ'
    contextObName = None
    contextLamp = [None, None]  # object, Data
    contextMaterial = None
    contextMaterialWrapper = None
    contextMatrix_rot = None  # Blender.mathutils.Matrix(); contextMatrix.identity()
    #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
    contextMesh_vertls = None  # flat array: (verts * 3)
    contextMesh_facels = None
    contextMeshMaterials = []  # (matname, [face_idxs])
    contextMeshUV = None  # flat array (verts * 2)

    TEXTURE_DICT = {}
    MATDICT = {}
# 	TEXMODE = Mesh.FaceModes['TEX']

    # Localspace variable names, faster.
    STRUCT_SIZE_FLOAT = struct.calcsize('f')
    STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
    STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
    STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
    STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
    STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
    STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
    # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
    # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
    # only init once
    object_list = []  # for hierarchy
    object_parent = []  # index of parent in hierarchy, 0xFFFF = no parent
    pivot_list = []  # pivots with hierarchy handling

    def putContextMesh(context, myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
        bmesh = bpy.data.meshes.new(contextObName)

        if myContextMesh_facels is None:
            myContextMesh_facels = []

        if myContextMesh_vertls:

            bmesh.vertices.add(len(myContextMesh_vertls) // 3)
            bmesh.vertices.foreach_set("co", myContextMesh_vertls)

            nbr_faces = len(myContextMesh_facels)
            bmesh.polygons.add(nbr_faces)
            bmesh.loops.add(nbr_faces * 3)
            eekadoodle_faces = []
            for v1, v2, v3 in myContextMesh_facels:
                eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3))
            bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3))
            bmesh.polygons.foreach_set("loop_total", (3,) * nbr_faces)
            bmesh.loops.foreach_set("vertex_index", eekadoodle_faces)

            if bmesh.polygons and contextMeshUV:
                bmesh.uv_layers.new()
                uv_faces = bmesh.uv_layers.active.data[:]
            else:
                uv_faces = None

            for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials):
                if matName is None:
                    bmat = None
                else:
                    bmat = MATDICT.get(matName)
                    # in rare cases no materials defined.
                    if bmat:
                        img = TEXTURE_DICT.get(bmat.name)
                    else:
                        print("    warning: material %r not defined!" % matName)
                        bmat = MATDICT[matName] = bpy.data.materials.new(matName)
                        img = None

                bmesh.materials.append(bmat)  # can be None

                if uv_faces  and img:
                    for fidx in faces:
                        bmesh.polygons[fidx].material_index = mat_idx
                        # TODO: How to restore this?
                        # uv_faces[fidx].image = img
                else:
                    for fidx in faces:
                        bmesh.polygons[fidx].material_index = mat_idx

            if uv_faces:
                uvl = bmesh.uv_layers.active.data[:]
                for fidx, pl in enumerate(bmesh.polygons):
                    face = myContextMesh_facels[fidx]
                    v1, v2, v3 = face

                    # eekadoodle
                    if v3 == 0:
                        v1, v2, v3 = v3, v1, v2

                    uvl[pl.loop_start].uv = contextMeshUV[v1 * 2: (v1 * 2) + 2]
                    uvl[pl.loop_start + 1].uv = contextMeshUV[v2 * 2: (v2 * 2) + 2]
                    uvl[pl.loop_start + 2].uv = contextMeshUV[v3 * 2: (v3 * 2) + 2]
                    # always a tri

        bmesh.validate()
        bmesh.update()

        ob = bpy.data.objects.new(contextObName, bmesh)
        object_dictionary[contextObName] = ob
        context.view_layer.active_layer_collection.collection.objects.link(ob)
        importedObjects.append(ob)

        if contextMatrix_rot:
            ob.matrix_local = contextMatrix_rot
            object_matrix[ob] = contextMatrix_rot.copy()

    #a spare chunk
    new_chunk = Chunk()
    temp_chunk = Chunk()

    CreateBlenderObject = False

    def read_float_color(temp_chunk):
        temp_data = file.read(STRUCT_SIZE_3FLOAT)
        temp_chunk.bytes_read += STRUCT_SIZE_3FLOAT
        return [float(col) for col in struct.unpack('<3f', temp_data)]

    def read_float(temp_chunk):
        temp_data = file.read(STRUCT_SIZE_FLOAT)
        temp_chunk.bytes_read += STRUCT_SIZE_FLOAT
        return struct.unpack('<f', temp_data)[0]

    def read_short(temp_chunk):
        temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
        temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
        return struct.unpack('<H', temp_data)[0]

    def read_byte_color(temp_chunk):
        temp_data = file.read(struct.calcsize('3B'))
        temp_chunk.bytes_read += 3
        return [float(col) / 255 for col in struct.unpack('<3B', temp_data)]  # data [0,1,2] == rgb

    def read_texture(new_chunk, temp_chunk, name, mapto):
#        new_texture = bpy.data.textures.new(name, type='IMAGE')

        u_scale, v_scale, u_offset, v_offset = 1.0, 1.0, 0.0, 0.0
        mirror = False
        extension = 'wrap'
        while (new_chunk.bytes_read < new_chunk.length):
            #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
            read_chunk(file, temp_chunk)

            if temp_chunk.ID == MAT_MAP_FILEPATH:
                texture_name, read_str_len = read_string(file)

                img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname, recursive=IMAGE_SEARCH)
                temp_chunk.bytes_read += read_str_len  # plus one for the null character that gets removed

            elif temp_chunk.ID == MAT_MAP_USCALE:
                u_scale = read_float(temp_chunk)
            elif temp_chunk.ID == MAT_MAP_VSCALE:
                v_scale = read_float(temp_chunk)

            elif temp_chunk.ID == MAT_MAP_UOFFSET:
                u_offset = read_float(temp_chunk)
            elif temp_chunk.ID == MAT_MAP_VOFFSET:
                v_offset = read_float(temp_chunk)

            elif temp_chunk.ID == MAT_MAP_TILING:
                tiling = read_short(temp_chunk)
                if tiling & 0x2:
                    extension = 'mirror'
                elif tiling & 0x10:
                    extension = 'decal'

            elif temp_chunk.ID == MAT_MAP_ANG:
                print("\nwarning: ignoring UV rotation")

            skip_to_end(file, temp_chunk)
            new_chunk.bytes_read += temp_chunk.bytes_read

        # add the map to the material in the right channel
        if img:
            add_texture_to_material(img, (u_scale, v_scale, 1),
                                    (u_offset, v_offset, 0), extension, contextMaterialWrapper, mapto)

    dirname = os.path.dirname(file.name)

    #loop through all the data for this chunk (previous chunk) and see what it is
    while (previous_chunk.bytes_read < previous_chunk.length):
        #print '\t', previous_chunk.bytes_read, 'keep going'
        #read the next chunk
        #print 'reading a chunk'
        read_chunk(file, new_chunk)

        #is it a Version chunk?
        if new_chunk.ID == VERSION:
            #print 'if new_chunk.ID == VERSION:'
            #print 'found a VERSION chunk'
            #read in the version of the file
            #it's an unsigned short (H)
            temp_data = file.read(struct.calcsize('I'))
            version = struct.unpack('<I', temp_data)[0]
            new_chunk.bytes_read += 4  # read the 4 bytes for the version number
            #this loader works with version 3 and below, but may not with 4 and above
            if version > 3:
                print('\tNon-Fatal Error:  Version greater than 3, may not load correctly: ', version)

        #is it an object info chunk?
        elif new_chunk.ID == OBJECTINFO:
            #print 'elif new_chunk.ID == OBJECTINFO:'
            # print 'found an OBJECTINFO chunk'
            process_next_chunk(context, file, new_chunk, importedObjects, IMAGE_SEARCH)

            #keep track of how much we read in the main chunk
            new_chunk.bytes_read += temp_chunk.bytes_read

        #is it an object chunk?
        elif new_chunk.ID == OBJECT:

            if CreateBlenderObject:
                putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
                contextMesh_vertls = []
                contextMesh_facels = []

                ## preparando para receber o proximo objeto
                contextMeshMaterials = []  # matname:[face_idxs]
                contextMeshUV = None
                # Reset matrix
                contextMatrix_rot = None
                #contextMatrix_tx = None

            CreateBlenderObject = True
            contextObName, read_str_len = read_string(file)
            new_chunk.bytes_read += read_str_len

        #is it a material chunk?
        elif new_chunk.ID == MATERIAL:

# 			print("read material")

            #print 'elif new_chunk.ID == MATERIAL:'
            contextMaterial = bpy.data.materials.new('Material')
            contextMaterialWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=True)

        elif new_chunk.ID == MAT_NAME:
            #print 'elif new_chunk.ID == MAT_NAME:'
            material_name, read_str_len = read_string(file)

# 			print("material name", material_name)

            #plus one for the null character that ended the string
            new_chunk.bytes_read += read_str_len

            contextMaterial.name = material_name.rstrip()  # remove trailing  whitespace
            MATDICT[material_name] = contextMaterial

        elif new_chunk.ID == MAT_AMBIENT:
            #print 'elif new_chunk.ID == MAT_AMBIENT:'
            read_chunk(file, temp_chunk)
            # TODO: consider ambient term somehow. maybe add to color
#               if temp_chunk.ID == MAT_FLOAT_COLOR:
#               contextMaterial.mirror_color = read_float_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3f'))
# 				temp_chunk.bytes_read += 12
# 				contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
#            elif temp_chunk.ID == MAT_24BIT_COLOR:
#                contextMaterial.mirror_color = read_byte_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3B'))
# 				temp_chunk.bytes_read += 3
# 				contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
#            else:
            skip_to_end(file, temp_chunk)
            new_chunk.bytes_read += temp_chunk.bytes_read

        elif new_chunk.ID == MAT_DIFFUSE:
            #print 'elif new_chunk.ID == MAT_DIFFUSE:'
            read_chunk(file, temp_chunk)
            if temp_chunk.ID == MAT_FLOAT_COLOR:
                contextMaterialWrapper.base_color = read_float_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3f'))
# 				temp_chunk.bytes_read += 12
# 				contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
            elif temp_chunk.ID == MAT_24BIT_COLOR:
                contextMaterialWrapper.base_color = read_byte_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3B'))
# 				temp_chunk.bytes_read += 3
# 				contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
            else:
                skip_to_end(file, temp_chunk)

# 			print("read material diffuse color", contextMaterial.diffuse_color)

            new_chunk.bytes_read += temp_chunk.bytes_read

        elif new_chunk.ID == MAT_SPECULAR:
            #print 'elif new_chunk.ID == MAT_SPECULAR:'
            read_chunk(file, temp_chunk)
			# TODO: consider using specular term somehow
#            if temp_chunk.ID == MAT_FLOAT_COLOR:
#                contextMaterial.specular_color = read_float_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3f'))
# 				temp_chunk.bytes_read += 12
# 				contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
#            elif temp_chunk.ID == MAT_24BIT_COLOR:
#                contextMaterial.specular_color = read_byte_color(temp_chunk)
# 				temp_data = file.read(struct.calcsize('3B'))
# 				temp_chunk.bytes_read += 3
# 				contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
#            else:
            skip_to_end(file, temp_chunk)
            new_chunk.bytes_read += temp_chunk.bytes_read

        elif new_chunk.ID == MAT_TEXTURE_MAP:
            read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")

        elif new_chunk.ID == MAT_SPECULAR_MAP:
            read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")

        elif new_chunk.ID == MAT_OPACITY_MAP:
            read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")

        elif new_chunk.ID == MAT_BUMP_MAP:
            read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")

        elif new_chunk.ID == MAT_TRANSPARENCY:
            #print 'elif new_chunk.ID == MAT_TRANSPARENCY:'
            read_chunk(file, temp_chunk)

            if temp_chunk.ID == PERCENTAGE_SHORT:
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
                temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
                contextMaterialWrapper.alpha = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100)
            elif temp_chunk.ID == PERCENTAGE_FLOAT:
                temp_data = file.read(STRUCT_SIZE_FLOAT)
                temp_chunk.bytes_read += STRUCT_SIZE_FLOAT
                contextMaterialWrapper.alpha = 1 - float(struct.unpack('f', temp_data)[0])
            else:
                print( "Cannot read material transparency")

            new_chunk.bytes_read += temp_chunk.bytes_read

        elif new_chunk.ID == OBJECT_LIGHT:  # Basic lamp support.

            temp_data = file.read(STRUCT_SIZE_3FLOAT)

            x, y, z = struct.unpack('<3f', temp_data)
            new_chunk.bytes_read += STRUCT_SIZE_3FLOAT

            # no lamp in dict that would be confusing
            contextLamp[1] = bpy.data.lights.new("Lamp", 'POINT')
            contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1])

            context.view_layer.active_layer_collection.collection.objects.link(ob)
            importedObjects.append(contextLamp[0])

            #print 'number of faces: ', num_faces
            #print x,y,z
            contextLamp[0].location = x, y, z

            # Reset matrix
            contextMatrix_rot = None
            #contextMatrix_tx = None
            #print contextLamp.name,

        elif new_chunk.ID == OBJECT_MESH:
            # print 'Found an OBJECT_MESH chunk'
            pass
        elif new_chunk.ID == OBJECT_VERTICES:
            """
            Worldspace vertex locations
            """
            # print 'elif new_chunk.ID == OBJECT_VERTICES:'
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            num_verts = struct.unpack('<H', temp_data)[0]
            new_chunk.bytes_read += 2

            # print 'number of verts: ', num_verts
            contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
            new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
            # dummyvert is not used atm!

            #print 'object verts: bytes read: ', new_chunk.bytes_read

        elif new_chunk.ID == OBJECT_FACES:
            # print 'elif new_chunk.ID == OBJECT_FACES:'
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            num_faces = struct.unpack('<H', temp_data)[0]
            new_chunk.bytes_read += 2
            #print 'number of faces: ', num_faces

            # print '\ngetting a face'
            temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
            new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces  # 4 short ints x 2 bytes each
            contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
            contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]

        elif new_chunk.ID == OBJECT_MATERIAL:
            # print 'elif new_chunk.ID == OBJECT_MATERIAL:'
            material_name, read_str_len = read_string(file)
            new_chunk.bytes_read += read_str_len  # remove 1 null character.

            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            num_faces_using_mat = struct.unpack('<H', temp_data)[0]
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT

            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat

            temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data)

            contextMeshMaterials.append((material_name, temp_data))

            #look up the material in all the materials

        elif new_chunk.ID == OBJECT_UV:
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            num_uv = struct.unpack('<H', temp_data)[0]
            new_chunk.bytes_read += 2

            temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
            new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
            contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)

        elif new_chunk.ID == OBJECT_TRANS_MATRIX:
            # How do we know the matrix size? 54 == 4x4 48 == 4x3
            temp_data = file.read(STRUCT_SIZE_4x3MAT)
            data = list(struct.unpack('<ffffffffffff', temp_data))
            new_chunk.bytes_read += STRUCT_SIZE_4x3MAT

            contextMatrix_rot = mathutils.Matrix((data[:3] + [0],
                                                  data[3:6] + [0],
                                                  data[6:9] + [0],
                                                  data[9:] + [1],
                                                  )).transposed()

        elif  (new_chunk.ID == MAT_MAP_FILEPATH):
            texture_name, read_str_len = read_string(file)
            if contextMaterial.name not in TEXTURE_DICT:
                TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH)

            new_chunk.bytes_read += read_str_len  # plus one for the null character that gets removed
        elif new_chunk.ID == EDITKEYFRAME:
            pass

        # including these here means their EK_OB_NODE_HEADER are scanned
        elif new_chunk.ID in {ED_KEY_AMBIENT_NODE,
                               ED_KEY_OBJECT_NODE,
                               ED_KEY_CAMERA_NODE,
                               ED_KEY_TARGET_NODE,
                               ED_KEY_LIGHT_NODE,
                               ED_KEY_L_TARGET_NODE,
                               ED_KEY_SPOTLIGHT_NODE}:  # another object is being processed
            child = None

        elif new_chunk.ID == EK_OB_NODE_HEADER:
            object_name, read_str_len = read_string(file)
            new_chunk.bytes_read += read_str_len
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
            new_chunk.bytes_read += 4
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            hierarchy = struct.unpack('<H', temp_data)[0]
            new_chunk.bytes_read += 2

            child = object_dictionary.get(object_name)

            if child is None:
                child = bpy.data.objects.new(object_name, None)  # create an empty object
                context.view_layer.active_layer_collection.collection.objects.link(child)
                importedObjects.append(child)

            object_list.append(child)
            object_parent.append(hierarchy)
            pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0)))

        elif new_chunk.ID == EK_OB_INSTANCE_NAME:
            object_name, read_str_len = read_string(file)
            # child.name = object_name
            child.name += "." + object_name
            object_dictionary[object_name] = child
            new_chunk.bytes_read += read_str_len
            # print("new instance object:", object_name)

        elif new_chunk.ID == EK_OB_PIVOT:  # translation
                temp_data = file.read(STRUCT_SIZE_3FLOAT)
                pivot = struct.unpack('<3f', temp_data)
                new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
                pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot)

        elif new_chunk.ID == EK_OB_POSITION_TRACK:  # translation
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            nkeys = struct.unpack('<H', temp_data)[0]
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
            for i in range(nkeys):
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
                nframe = struct.unpack('<H', temp_data)[0]
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
                temp_data = file.read(STRUCT_SIZE_3FLOAT)
                loc = struct.unpack('<3f', temp_data)
                new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
                if nframe == 0:
                    child.location = loc

        elif new_chunk.ID == EK_OB_ROTATION_TRACK:  # rotation
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            nkeys = struct.unpack('<H', temp_data)[0]
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
            for i in range(nkeys):
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
                nframe = struct.unpack('<H', temp_data)[0]
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
                temp_data = file.read(STRUCT_SIZE_4FLOAT)
                rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data)
                new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
                if nframe == 0:
                    child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler()   # why negative?

        elif new_chunk.ID == EK_OB_SCALE_TRACK:  # translation
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            nkeys = struct.unpack('<H', temp_data)[0]
            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
            for i in range(nkeys):
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
                nframe = struct.unpack('<H', temp_data)[0]
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
                temp_data = file.read(STRUCT_SIZE_3FLOAT)
                sca = struct.unpack('<3f', temp_data)
                new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
                if nframe == 0:
                    child.scale = sca

        else:  # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
            # print 'skipping to end of this chunk'
            #print("unknown chunk: "+hex(new_chunk.ID))
            buffer_size = new_chunk.length - new_chunk.bytes_read
            binary_format = "%ic" % buffer_size
            temp_data = file.read(struct.calcsize(binary_format))
            new_chunk.bytes_read += buffer_size

        #update the previous chunk bytes read
        # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
        # print previous_chunk.bytes_read, new_chunk.bytes_read
        previous_chunk.bytes_read += new_chunk.bytes_read
        ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read

    # FINISHED LOOP
    # There will be a number of objects still not added
    if CreateBlenderObject:
        putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials)

    # Assign parents to objects
    # check _if_ we need to assign first because doing so recalcs the depsgraph
    for ind, ob in enumerate(object_list):
        parent = object_parent[ind]
        if parent == ROOT_OBJECT:
            if ob.parent is not None:
                ob.parent = None
        else:
            if ob.parent != object_list[parent]:
                if ob == object_list[parent]:
                    print('   warning: Cannot assign self to parent ', ob)
                else:
                    ob.parent = object_list[parent]

            # pivot_list[ind] += pivot_list[parent]  # XXX, not sure this is correct, should parent space matrix be applied before combining?
    # fix pivots
    for ind, ob in enumerate(object_list):
        if ob.type == 'MESH':
            pivot = pivot_list[ind]
            pivot_matrix = object_matrix.get(ob, mathutils.Matrix())  # unlikely to fail
            pivot_matrix = mathutils.Matrix.Translation(pivot_matrix.to_3x3() @ -pivot)
            ob.data.transform(pivot_matrix)
Exemplo n.º 10
0
 def toBlender(self):
     mat = mathutils.Matrix(
         [self.rows[0], self.rows[1], self.rows[2], [0, 0, 0, 1]])
     return mat
Exemplo n.º 11
0
    def parse(self, filename):
        """Construct tri's in each brick."""
        # FIXME: rewrite - Rework function (#35)
        subfiles = []

        while True:
            # Get the path to the part
            filename = (filename
                        if os.path.exists(filename) else locatePart(filename))

            # The part does not exist
            # TODO Do not halt on this condition (#11)
            if filename is None:
                return False

            # Read the located part
            with open(filename, "rt", encoding="utf_8") as f:
                lines = f.readlines()

            # Some models may not have headers or enough lines
            # to support a header. Handle this case to avoid
            # hitting an IndexError trying to extract the header line.
            partTypeLine = ("" if len(lines) <= 3 else lines[3])

            # Check the part header for top-level part status
            is_top_part = is_top_level_part(partTypeLine)

            # Linked parts relies on the flawed is_top_part logic (#112)
            # TODO Correct linked parts to use proper logic
            # and remove this kludge
            if LinkParts:  # noqa
                is_top_part = filename == fileName  # noqa

            self.part_count += 1
            if self.part_count > 1 and self.level == 0:
                self.subparts.append([
                    filename, self.level + 1, self.mat, self.colour,
                    self.orientation
                ])
            else:
                for retval in lines:
                    tmpdate = retval.strip()
                    if tmpdate != "":
                        tmpdate = tmpdate.split()

                        # Part content
                        if tmpdate[0] == "1":
                            new_file = tmpdate[14]
                            (x, y, z, a, b, c, d, e, f, g, h,
                             i) = map(float, tmpdate[2:14])

                            # Reset orientation of top-level part,
                            # track original orientation
                            # TODO Use corrected isPart logic
                            if self.part_count == 1 and is_top_part and LinkParts:  # noqa
                                mat_new = self.mat * mathutils.Matrix(
                                    ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0),
                                     (0, 0, 0, 1)))
                                orientation = self.mat * mathutils.Matrix((
                                    (a, b, c, x), (d, e, f, y), (g, h, i, z),
                                    (0, 0, 0, 1))) * mathutils.Matrix.Rotation(
                                        math.radians(90), 4, 'X')
                            else:
                                mat_new = self.mat * mathutils.Matrix(
                                    ((a, b, c, x), (d, e, f, y), (g, h, i, z),
                                     (0, 0, 0, 1)))
                                orientation = None
                            color = tmpdate[1]
                            if color == '16':
                                color = self.colour
                            subfiles.append([new_file, mat_new, color])

                            # When top-level part, save orientation separately
                            # TODO Use corrected is_top_part logic
                            if self.part_count == 1 and is_top_part:
                                subfiles.append(
                                    ['orientation', orientation, ''])

                        # Triangle (tri)
                        if tmpdate[0] == "3":
                            self.parse_line(tmpdate)

                        # Quadrilateral (quad)
                        if tmpdate[0] == "4":
                            self.parse_quad(tmpdate)

            if len(subfiles) > 0:
                subfile = subfiles.pop()
                filename = subfile[0]
                # When top-level brick orientation information found,
                # save it in self.orientation
                if filename == 'orientation':
                    self.orientation = subfile[1]
                    subfile = subfiles.pop()
                    filename = subfile[0]
                self.mat = subfile[1]
                self.colour = subfile[2]
            else:
                break
Exemplo n.º 12
0
    def __applyAdditionalTransform(self,
                                   obj,
                                   src,
                                   dest,
                                   influence,
                                   pose_bones,
                                   rotation=False,
                                   location=False):
        """ apply additional transform to the bone.
         @param obj the object of the target armature
         @param src the PoseBone that apply the transform to another bone.
         @param dest the PoseBone that another bone apply the transform to.
        """
        if not rotation and not location:
            return
        bone_name = None

        # If src has been applied the additional transform by another bone,
        # copy the constraint of it to dest.
        src = self.__findNoneAdditionalBone(src, pose_bones)

        with bpyutils.edit_object(obj):
            src_bone = obj.data.edit_bones[src.name]
            s_bone = obj.data.edit_bones.new(name='shadow')
            s_bone.head = src_bone.head
            s_bone.tail = src_bone.tail
            s_bone.parent = src_bone.parent
            #s_bone.use_connect = src_bone.use_connect
            s_bone.layers = (False, False, False, False, False, False, False,
                             False, True, False, False, False, False, False,
                             False, False, False, False, False, False, False,
                             False, False, False, False, False, False, False,
                             False, False, False, False)
            s_bone.use_inherit_rotation = False
            s_bone.use_local_location = True
            s_bone.use_inherit_scale = False
            bone_name = s_bone.name

            dest_bone = obj.data.edit_bones[dest.name]
            dest_bone.use_inherit_rotation = not rotation
            dest_bone.use_local_location = not location

        p_bone = obj.pose.bones[bone_name]
        p_bone.is_mmd_shadow_bone = True

        if rotation:
            c = p_bone.constraints.new('COPY_ROTATION')
            c.target = obj
            c.subtarget = src.name
            c.target_space = 'LOCAL'
            c.owner_space = 'LOCAL'

            if influence > 0:
                c.influence = influence
            else:
                c.influence = -influence
                c.invert_x = True
                c.invert_y = True
                c.invert_z = True

        if location:
            c = p_bone.constraints.new('COPY_LOCATION')
            c.target = obj
            c.subtarget = src.name
            c.target_space = 'LOCAL'
            c.owner_space = 'LOCAL'

            if influence > 0:
                c.influence = influence
            else:
                c.influence = -influence
                c.invert_x = True
                c.invert_y = True
                c.invert_z = True

        c = dest.constraints.new('CHILD_OF')

        c.target = obj
        c.subtarget = p_bone.name
        c.use_location_x = location
        c.use_location_y = location
        c.use_location_z = location
        c.use_rotation_x = rotation
        c.use_rotation_y = rotation
        c.use_rotation_z = rotation
        c.use_scale_x = False
        c.use_scale_y = False
        c.use_scale_z = False
        c.inverse_matrix = mathutils.Matrix(src.matrix).inverted()

        if dest.parent is not None:
            parent = dest.parent
            c = dest.constraints.new('CHILD_OF')
            c.target = obj
            c.subtarget = parent.name
            c.use_location_x = False
            c.use_location_y = False
            c.use_location_z = False
            c.use_scale_x = False
            c.use_scale_y = False
            c.use_scale_z = False
            c.inverse_matrix = mathutils.Matrix(parent.matrix).inverted()
Exemplo n.º 13
0
class PMXImporter:
    TO_BLE_MATRIX = mathutils.Matrix([[1.0, 0.0, 0.0,
                                       0.0], [0.0, 0.0, 1.0, 0.0],
                                      [0.0, 1.0, 0.0, 0.0],
                                      [0.0, 0.0, 0.0, 1.0]])

    def __init__(self):
        self.__model = None
        self.__targetScene = bpy.context.scene

        self.__scale = None

        self.__root = None
        self.__armObj = None
        self.__meshObj = None

        self.__vertexTable = None
        self.__vertexGroupTable = None
        self.__textureTable = None

        self.__boneTable = []
        self.__rigidTable = []
        self.__nonCollisionJointTable = None
        self.__jointTable = []

        self.__materialFaceCountTable = None
        self.__nonCollisionConstraints = []

        # object groups
        self.__allObjGroup = None  # a group which contains all objects created for the target model by mmd_tools.
        self.__mainObjGroup = None  # a group which contains armature and mesh objects.
        self.__rigidObjGroup = None  # a group which contains objects of rigid bodies imported from a pmx model.
        self.__jointObjGroup = None  # a group which contains objects of joints imported from a pmx model.
        self.__tempObjGroup = None  # a group which contains temporary objects.

    @staticmethod
    def flipUV_V(uv):
        u, v = uv
        return [u, 1.0 - v]

    def __getMaterialIndexFromFaceIndex(self, face_index):
        count = 0
        for i, c in enumerate(self.__materialFaceCountTable):
            if face_index < count + c:
                return i
            count += c
        raise Exception('invalid face index.')

    def __createObjects(self):
        """ Create main objects and link them to scene.
        """
        pmxModel = self.__model

        self.__root = bpy.data.objects.new(name=pmxModel.name,
                                           object_data=None)
        self.__targetScene.objects.link(self.__root)

        mesh = bpy.data.meshes.new(name=pmxModel.name)
        self.__meshObj = bpy.data.objects.new(name=pmxModel.name + '_mesh',
                                              object_data=mesh)

        arm = bpy.data.armatures.new(name=pmxModel.name)
        self.__armObj = bpy.data.objects.new(name=pmxModel.name + '_arm',
                                             object_data=arm)
        self.__meshObj.parent = self.__armObj

        self.__targetScene.objects.link(self.__meshObj)
        self.__targetScene.objects.link(self.__armObj)

        self.__armObj.parent = self.__root

        self.__allObjGroup.objects.link(self.__root)
        self.__allObjGroup.objects.link(self.__armObj)
        self.__allObjGroup.objects.link(self.__meshObj)
        self.__mainObjGroup.objects.link(self.__armObj)
        self.__mainObjGroup.objects.link(self.__meshObj)

    def __createGroups(self):
        pmxModel = self.__model
        self.__mainObjGroup = bpy.data.groups.new(name='mmd_tools.' +
                                                  pmxModel.name)
        logging.debug('Create main group: %s', self.__mainObjGroup.name)
        self.__allObjGroup = bpy.data.groups.new(name='mmd_tools.' +
                                                 pmxModel.name + '_all')
        logging.debug('Create all group: %s', self.__allObjGroup.name)
        self.__rigidObjGroup = bpy.data.groups.new(name='mmd_tools.' +
                                                   pmxModel.name + '_rigids')
        logging.debug('Create rigid group: %s', self.__rigidObjGroup.name)
        self.__jointObjGroup = bpy.data.groups.new(name='mmd_tools.' +
                                                   pmxModel.name + '_joints')
        logging.debug('Create joint group: %s', self.__jointObjGroup.name)
        self.__tempObjGroup = bpy.data.groups.new(name='mmd_tools.' +
                                                  pmxModel.name + '_temp')
        logging.debug('Create temporary group: %s', self.__tempObjGroup.name)

    def __importVertexGroup(self):
        self.__vertexGroupTable = []
        for i in self.__model.bones:
            self.__vertexGroupTable.append(
                self.__meshObj.vertex_groups.new(name=i.name))

    def __importVertices(self):
        self.__importVertexGroup()

        pmxModel = self.__model
        mesh = self.__meshObj.data

        mesh.vertices.add(count=len(self.__model.vertices))
        for i, pv in enumerate(pmxModel.vertices):
            bv = mesh.vertices[i]

            bv.co = mathutils.Vector(pv.co) * self.TO_BLE_MATRIX * self.__scale
            bv.normal = pv.normal

            if isinstance(pv.weight.weights, pmx.BoneWeightSDEF):
                self.__vertexGroupTable[pv.weight.bones[0]].add(
                    index=[i], weight=pv.weight.weights.weight, type='REPLACE')
                self.__vertexGroupTable[pv.weight.bones[1]].add(
                    index=[i],
                    weight=1.0 - pv.weight.weights.weight,
                    type='REPLACE')
            elif len(pv.weight.bones) == 1:
                self.__vertexGroupTable[pv.weight.bones[0]].add(index=[i],
                                                                weight=1.0,
                                                                type='REPLACE')
            elif len(pv.weight.bones) == 2:
                self.__vertexGroupTable[pv.weight.bones[0]].add(
                    index=[i], weight=pv.weight.weights[0], type='REPLACE')
                self.__vertexGroupTable[pv.weight.bones[1]].add(
                    index=[i],
                    weight=1.0 - pv.weight.weights[0],
                    type='REPLACE')
            elif len(pv.weight.bones) == 4:
                self.__vertexGroupTable[pv.weight.bones[0]].add(
                    index=[i], weight=pv.weight.weights[0], type='REPLACE')
                self.__vertexGroupTable[pv.weight.bones[1]].add(
                    index=[i], weight=pv.weight.weights[1], type='REPLACE')
                self.__vertexGroupTable[pv.weight.bones[2]].add(
                    index=[i], weight=pv.weight.weights[2], type='REPLACE')
                self.__vertexGroupTable[pv.weight.bones[3]].add(
                    index=[i], weight=pv.weight.weights[3], type='REPLACE')
            else:
                raise Exception('unkown bone weight type.')

    def __importTextures(self):
        pmxModel = self.__model

        self.__textureTable = []
        for i in pmxModel.textures:
            name = os.path.basename(i.path).split('.')[0]
            tex = bpy.data.textures.new(name=name, type='IMAGE')
            try:
                tex.image = bpy.data.images.load(filepath=i.path)
            except Exception:
                logging.warning('failed to load %s', str(i.path))
            self.__textureTable.append(tex)

    def __createEditBones(self, obj, pmx_bones):
        """ create EditBones from pmx file data.
        @return the list of bone names which can be accessed by the bone index of pmx data.
        """
        editBoneTable = []
        nameTable = []
        dependency_cycle_ik_bones = []
        for i, p_bone in enumerate(pmx_bones):
            if p_bone.isIK:
                if p_bone.target != -1:
                    t = pmx_bones[p_bone.target]
                    if p_bone.parent == t.parent:
                        dependency_cycle_ik_bones.append(i)

        with bpyutils.edit_object(obj):
            for i in pmx_bones:
                bone = obj.data.edit_bones.new(name=i.name)
                loc = mathutils.Vector(
                    i.location) * self.__scale * self.TO_BLE_MATRIX
                bone.head = loc
                editBoneTable.append(bone)
                nameTable.append(bone.name)

            for i, (b_bone, m_bone) in enumerate(zip(editBoneTable,
                                                     pmx_bones)):
                if m_bone.parent != -1:
                    if i not in dependency_cycle_ik_bones:
                        b_bone.parent = editBoneTable[m_bone.parent]
                    else:
                        b_bone.parent = editBoneTable[m_bone.parent].parent

            for b_bone, m_bone in zip(editBoneTable, pmx_bones):
                if isinstance(m_bone.displayConnection, int):
                    if m_bone.displayConnection != -1:
                        b_bone.tail = editBoneTable[
                            m_bone.displayConnection].head
                    else:
                        b_bone.tail = b_bone.head
                else:
                    loc = mathutils.Vector(
                        m_bone.displayConnection
                    ) * self.TO_BLE_MATRIX * self.__scale
                    b_bone.tail = b_bone.head + loc

            for b_bone in editBoneTable:
                # Set the length of too short bones to 1 because Blender delete them.
                if b_bone.length < 0.001:
                    loc = mathutils.Vector([0, 0, 1]) * self.__scale
                    b_bone.tail = b_bone.head + loc

            for b_bone, m_bone in zip(editBoneTable, pmx_bones):
                if b_bone.parent is not None and b_bone.parent.tail == b_bone.head:
                    if not m_bone.isMovable:
                        b_bone.use_connect = True

        return nameTable

    def __sortPoseBonesByBoneIndex(self, pose_bones, bone_names):
        r = []
        for i in bone_names:
            r.append(pose_bones[i])
        return r

    def __applyIk(self, index, pmx_bone, pose_bones):
        """ create a IK bone constraint
         If the IK bone and the target bone is separated, a dummy IK target bone is created as a child of the IK bone.
         @param index the bone index
         @param pmx_bone pmx.Bone
         @param pose_bones the list of PoseBones sorted by the bone index
        """

        ik_bone = pose_bones[pmx_bone.target].parent
        target_bone = pose_bones[index]

        if (mathutils.Vector(ik_bone.tail) -
                mathutils.Vector(target_bone.head)).length > 0.001:
            logging.info('Found a seperated IK constraint: IK: %s, Target: %s',
                         ik_bone.name, target_bone.name)
            with bpyutils.edit_object(self.__armObj):
                s_bone = self.__armObj.data.edit_bones.new(name='shadow')
                logging.info('  Create a proxy bone: %s', s_bone.name)
                s_bone.head = ik_bone.tail
                s_bone.tail = s_bone.head + mathutils.Vector([0, 0, 1])
                s_bone.layers = (False, False, False, False, False, False,
                                 False, False, True, False, False, False,
                                 False, False, False, False, False, False,
                                 False, False, False, False, False, False,
                                 False, False, False, False, False, False,
                                 False, False)
                s_bone.parent = self.__armObj.data.edit_bones[target_bone.name]
                logging.info('  Set parent: %s -> %s', target_bone.name,
                             s_bone.name)
                # Must not access to EditBones from outside of the 'with' section.
                s_bone_name = s_bone.name

            logging.info('  Use %s as IK target bone instead of %s',
                         s_bone_name, target_bone.name)
            target_bone = self.__armObj.pose.bones[s_bone_name]
            target_bone.is_mmd_shadow_bone = True

        ikConst = ik_bone.constraints.new('IK')
        ikConst.chain_count = len(pmx_bone.ik_links)
        ikConst.target = self.__armObj
        ikConst.subtarget = target_bone.name
        if pmx_bone.isRotatable and not pmx_bone.isMovable:
            ikConst.use_location = pmx_bone.isMovable
            ikConst.use_rotation = pmx_bone.isRotatable
        for i in pmx_bone.ik_links:
            if i.maximumAngle is not None:
                bone = pose_bones[i.target]
                bone.use_ik_limit_x = True
                bone.use_ik_limit_y = True
                bone.use_ik_limit_z = True
                bone.ik_max_x = -i.minimumAngle[0]
                bone.ik_max_y = i.maximumAngle[1]
                bone.ik_max_z = i.maximumAngle[2]
                bone.ik_min_x = -i.maximumAngle[0]
                bone.ik_min_y = i.minimumAngle[1]
                bone.ik_min_z = i.minimumAngle[2]

    @staticmethod
    def __findNoneAdditionalBone(target, pose_bones, visited_bones=None):
        if visited_bones is None:
            visited_bones = []
        if target in visited_bones:
            raise ValueError('Detected cyclic dependency.')
        for i in filter(lambda x: x.type == 'CHILD_OF', target.constraints):
            if i.subtarget != target.parent.name:
                return PMXImporter.__findNoneAdditionalBone(
                    pose_bones[i.subtarget], pose_bones, visited_bones)
        return target

    def __applyAdditionalTransform(self,
                                   obj,
                                   src,
                                   dest,
                                   influence,
                                   pose_bones,
                                   rotation=False,
                                   location=False):
        """ apply additional transform to the bone.
         @param obj the object of the target armature
         @param src the PoseBone that apply the transform to another bone.
         @param dest the PoseBone that another bone apply the transform to.
        """
        if not rotation and not location:
            return
        bone_name = None

        # If src has been applied the additional transform by another bone,
        # copy the constraint of it to dest.
        src = self.__findNoneAdditionalBone(src, pose_bones)

        with bpyutils.edit_object(obj):
            src_bone = obj.data.edit_bones[src.name]
            s_bone = obj.data.edit_bones.new(name='shadow')
            s_bone.head = src_bone.head
            s_bone.tail = src_bone.tail
            s_bone.parent = src_bone.parent
            #s_bone.use_connect = src_bone.use_connect
            s_bone.layers = (False, False, False, False, False, False, False,
                             False, True, False, False, False, False, False,
                             False, False, False, False, False, False, False,
                             False, False, False, False, False, False, False,
                             False, False, False, False)
            s_bone.use_inherit_rotation = False
            s_bone.use_local_location = True
            s_bone.use_inherit_scale = False
            bone_name = s_bone.name

            dest_bone = obj.data.edit_bones[dest.name]
            dest_bone.use_inherit_rotation = not rotation
            dest_bone.use_local_location = not location

        p_bone = obj.pose.bones[bone_name]
        p_bone.is_mmd_shadow_bone = True

        if rotation:
            c = p_bone.constraints.new('COPY_ROTATION')
            c.target = obj
            c.subtarget = src.name
            c.target_space = 'LOCAL'
            c.owner_space = 'LOCAL'

            if influence > 0:
                c.influence = influence
            else:
                c.influence = -influence
                c.invert_x = True
                c.invert_y = True
                c.invert_z = True

        if location:
            c = p_bone.constraints.new('COPY_LOCATION')
            c.target = obj
            c.subtarget = src.name
            c.target_space = 'LOCAL'
            c.owner_space = 'LOCAL'

            if influence > 0:
                c.influence = influence
            else:
                c.influence = -influence
                c.invert_x = True
                c.invert_y = True
                c.invert_z = True

        c = dest.constraints.new('CHILD_OF')

        c.target = obj
        c.subtarget = p_bone.name
        c.use_location_x = location
        c.use_location_y = location
        c.use_location_z = location
        c.use_rotation_x = rotation
        c.use_rotation_y = rotation
        c.use_rotation_z = rotation
        c.use_scale_x = False
        c.use_scale_y = False
        c.use_scale_z = False
        c.inverse_matrix = mathutils.Matrix(src.matrix).inverted()

        if dest.parent is not None:
            parent = dest.parent
            c = dest.constraints.new('CHILD_OF')
            c.target = obj
            c.subtarget = parent.name
            c.use_location_x = False
            c.use_location_y = False
            c.use_location_z = False
            c.use_scale_x = False
            c.use_scale_y = False
            c.use_scale_z = False
            c.inverse_matrix = mathutils.Matrix(parent.matrix).inverted()

    def __importBones(self):
        pmxModel = self.__model

        boneNameTable = self.__createEditBones(self.__armObj, pmxModel.bones)
        pose_bones = self.__sortPoseBonesByBoneIndex(self.__armObj.pose.bones,
                                                     boneNameTable)
        self.__boneTable = pose_bones
        for i, p_bone in sorted(enumerate(pmxModel.bones),
                                key=lambda x: x[1].transform_order):
            b_bone = pose_bones[i]
            b_bone.mmd_bone_name_e = p_bone.name_e

            if not p_bone.isRotatable:
                b_bone.lock_rotation = [True, True, True]

            if not p_bone.isMovable:
                b_bone.lock_location = [True, True, True]

            if p_bone.isIK:
                if p_bone.target != -1:
                    self.__applyIk(i, p_bone, pose_bones)

            if p_bone.hasAdditionalRotate or p_bone.hasAdditionalLocation:
                bone_index, influ = p_bone.additionalTransform
                src_bone = pmxModel.bones[bone_index]
                self.__applyAdditionalTransform(self.__armObj,
                                                pose_bones[bone_index], b_bone,
                                                influ,
                                                self.__armObj.pose.bones,
                                                p_bone.hasAdditionalRotate,
                                                p_bone.hasAdditionalLocation)

            if p_bone.localCoordinate is not None:
                b_bone.mmd_enabled_local_axis = True
                b_bone.mmd_local_axis_x = p_bone.localCoordinate.x_axis
                b_bone.mmd_local_axis_z = p_bone.localCoordinate.z_axis

            if len(b_bone.children) == 0:
                b_bone.is_mmd_tip_bone = True
                b_bone.lock_rotation = [True, True, True]
                b_bone.lock_location = [True, True, True]
                b_bone.lock_scale = [True, True, True]
                b_bone.bone.hide = True

    def __importRigids(self):
        self.__rigidTable = []
        self.__nonCollisionJointTable = {}
        start_time = time.time()
        collisionGroups = []
        for i in range(16):
            collisionGroups.append([])
        for rigid in self.__model.rigids:
            if self.__onlyCollisions and rigid.mode != pmx.Rigid.MODE_STATIC:
                continue

            loc = mathutils.Vector(
                rigid.location) * self.TO_BLE_MATRIX * self.__scale
            rot = mathutils.Vector(rigid.rotation) * self.TO_BLE_MATRIX * -1
            rigid_type = None
            if rigid.type == pmx.Rigid.TYPE_SPHERE:
                bpy.ops.mesh.primitive_uv_sphere_add(segments=16,
                                                     ring_count=8,
                                                     size=1,
                                                     view_align=False,
                                                     enter_editmode=False)
                size = mathutils.Vector([1, 1, 1]) * rigid.size[0]
                rigid_type = 'SPHERE'
                bpy.ops.object.shade_smooth()
            elif rigid.type == pmx.Rigid.TYPE_BOX:
                bpy.ops.mesh.primitive_cube_add(view_align=False,
                                                enter_editmode=False)
                size = mathutils.Vector(rigid.size) * self.TO_BLE_MATRIX
                rigid_type = 'BOX'
            elif rigid.type == pmx.Rigid.TYPE_CAPSULE:
                obj = utils.makeCapsule(radius=rigid.size[0],
                                        height=rigid.size[1])
                size = mathutils.Vector([1, 1, 1])
                rigid_type = 'CAPSULE'
                bpy.ops.object.shade_smooth()
            else:
                raise Exception('Invalid rigid type')

            if rigid.type != pmx.Rigid.TYPE_CAPSULE:
                obj = bpy.context.selected_objects[0]
            obj.name = rigid.name
            obj.scale = size * self.__scale
            obj.hide_render = True
            obj.draw_type = 'WIRE'
            obj.is_mmd_rigid = True
            self.__rigidObjGroup.objects.link(obj)
            utils.selectAObject(obj)
            bpy.ops.object.transform_apply(location=False,
                                           rotation=True,
                                           scale=True)
            obj.location = loc
            obj.rotation_euler = rot
            bpy.ops.rigidbody.object_add(type='ACTIVE')
            if rigid.mode == pmx.Rigid.MODE_STATIC and rigid.bone is not None:
                bpy.ops.object.modifier_add(type='COLLISION')
                utils.setParentToBone(obj, self.__armObj,
                                      self.__boneTable[rigid.bone].name)
            elif rigid.bone is not None:
                bpy.ops.object.select_all(action='DESELECT')
                obj.select = True
                bpy.context.scene.objects.active = self.__root
                bpy.ops.object.parent_set(type='OBJECT',
                                          xmirror=False,
                                          keep_transform=True)

                target_bone = self.__boneTable[rigid.bone]
                empty = bpy.data.objects.new('mmd_bonetrack', None)
                bpy.context.scene.objects.link(empty)
                empty.location = target_bone.tail
                empty.empty_draw_size = 0.5 * self.__scale
                empty.empty_draw_type = 'ARROWS'
                empty.is_mmd_rigid_track_target = True
                self.__tempObjGroup.objects.link(empty)

                utils.selectAObject(empty)
                bpy.context.scene.objects.active = obj
                bpy.ops.object.parent_set(type='OBJECT',
                                          xmirror=False,
                                          keep_transform=False)

                empty.hide = True

                for i in target_bone.constraints:
                    if i.type == 'IK':
                        i.influence = 0
                const = target_bone.constraints.new('DAMPED_TRACK')
                const.target = empty
            else:
                obj.parent = self.__armObj
                bpy.ops.object.select_all(action='DESELECT')
                obj.select = True

            obj.rigid_body.collision_shape = rigid_type
            group_flags = []
            rb = obj.rigid_body
            rb.friction = rigid.friction
            rb.mass = rigid.mass
            rb.angular_damping = rigid.rotation_attenuation
            rb.linear_damping = rigid.velocity_attenuation
            rb.restitution = rigid.bounce
            if rigid.mode == pmx.Rigid.MODE_STATIC:
                rb.kinematic = True

            for i in range(16):
                if rigid.collision_group_mask & (1 << i) == 0:
                    for j in collisionGroups[i]:
                        s = time.time()
                        self.__makeNonCollisionConstraint(obj, j)

            collisionGroups[rigid.collision_group_number].append(obj)
            self.__rigidTable.append(obj)
        logging.debug('Finished importing rigid bodies in %f seconds.',
                      time.time() - start_time)

    def __getRigidRange(self, obj):
        return (mathutils.Vector(obj.bound_box[0]) -
                mathutils.Vector(obj.bound_box[6])).length

    def __makeNonCollisionConstraint(self, obj_a, obj_b):
        if (mathutils.Vector(obj_a.location) - mathutils.Vector(obj_b.location)
            ).length > self.__distance_of_ignore_collisions * (
                self.__getRigidRange(obj_a) + self.__getRigidRange(obj_b)):
            return
        t = bpy.data.objects.new(
            'ncc.%d' % len(self.__nonCollisionConstraints), None)
        bpy.context.scene.objects.link(t)
        t.location = [0, 0, 0]
        t.empty_draw_size = 0.5 * self.__scale
        t.empty_draw_type = 'ARROWS'
        t.is_mmd_non_collision_constraint = True
        t.hide_render = True
        t.parent = self.__root
        utils.selectAObject(t)
        bpy.ops.rigidbody.constraint_add(type='GENERIC')
        rb = t.rigid_body_constraint
        rb.disable_collisions = True
        rb.object1 = obj_a
        rb.object2 = obj_b
        self.__nonCollisionConstraints.append(t)
        self.__nonCollisionJointTable[frozenset((obj_a, obj_b))] = t
        self.__tempObjGroup.objects.link(t)

    def __makeSpring(self, target, base_obj, spring_stiffness):
        utils.selectAObject(target)
        bpy.ops.object.duplicate()
        spring_target = bpy.context.scene.objects.active
        spring_target.is_mmd_spring_goal = True
        spring_target.rigid_body.kinematic = True
        spring_target.rigid_body.collision_groups = (False, False, False,
                                                     False, False, False,
                                                     False, False, False,
                                                     False, False, False,
                                                     False, False, False,
                                                     False, False, False,
                                                     False, True)
        bpy.context.scene.objects.active = base_obj
        bpy.ops.object.parent_set(type='OBJECT',
                                  xmirror=False,
                                  keep_transform=True)
        self.__rigidObjGroup.objects.unlink(spring_target)
        self.__tempObjGroup.objects.link(spring_target)

        obj = bpy.data.objects.new('S.' + target.name, None)
        bpy.context.scene.objects.link(obj)
        obj.location = target.location
        obj.empty_draw_size = 0.5 * self.__scale
        obj.empty_draw_type = 'ARROWS'
        obj.hide_render = True
        obj.is_mmd_spring_joint = True
        obj.parent = self.__root
        self.__tempObjGroup.objects.link(obj)
        utils.selectAObject(obj)
        bpy.ops.rigidbody.constraint_add(type='GENERIC_SPRING')
        rbc = obj.rigid_body_constraint
        rbc.object1 = target
        rbc.object2 = spring_target

        rbc.use_spring_x = True
        rbc.use_spring_y = True
        rbc.use_spring_z = True

        rbc.spring_stiffness_x = spring_stiffness[0]
        rbc.spring_stiffness_y = spring_stiffness[1]
        rbc.spring_stiffness_z = spring_stiffness[2]

    def __importJoints(self):
        if self.__onlyCollisions:
            return
        self.__jointTable = []
        for joint in self.__model.joints:
            loc = mathutils.Vector(
                joint.location) * self.TO_BLE_MATRIX * self.__scale
            rot = mathutils.Vector(joint.rotation) * self.TO_BLE_MATRIX * -1
            obj = bpy.data.objects.new('J.' + joint.name, None)
            bpy.context.scene.objects.link(obj)
            obj.location = loc
            obj.rotation_euler = rot
            obj.empty_draw_size = 0.5 * self.__scale
            obj.empty_draw_type = 'ARROWS'
            obj.hide_render = True
            obj.is_mmd_joint = True
            obj.parent = self.__root
            self.__jointObjGroup.objects.link(obj)

            utils.selectAObject(obj)
            bpy.ops.rigidbody.constraint_add(type='GENERIC_SPRING')
            rbc = obj.rigid_body_constraint

            rigid1 = self.__rigidTable[joint.src_rigid]
            rigid2 = self.__rigidTable[joint.dest_rigid]
            rbc.object1 = rigid1
            rbc.object2 = rigid2

            if not self.__ignoreNonCollisionGroups:
                non_collision_joint = self.__nonCollisionJointTable.get(
                    frozenset((rigid1, rigid2)), None)
                if non_collision_joint is None:
                    rbc.disable_collisions = False
                else:
                    utils.selectAObject(non_collision_joint)
                    bpy.ops.object.delete(use_global=False)
                    rbc.disable_collisions = True
            elif rigid1.rigid_body.kinematic and not rigid2.rigid_body.kinematic or not rigid1.rigid_body.kinematic and rigid2.rigid_body.kinematic:
                rbc.disable_collisions = False

            rbc.use_limit_ang_x = True
            rbc.use_limit_ang_y = True
            rbc.use_limit_ang_z = True
            rbc.use_limit_lin_x = True
            rbc.use_limit_lin_y = True
            rbc.use_limit_lin_z = True
            rbc.use_spring_x = True
            rbc.use_spring_y = True
            rbc.use_spring_z = True

            max_loc = mathutils.Vector(
                joint.maximum_location) * self.TO_BLE_MATRIX * self.__scale
            min_loc = mathutils.Vector(
                joint.minimum_location) * self.TO_BLE_MATRIX * self.__scale
            rbc.limit_lin_x_upper = max_loc[0]
            rbc.limit_lin_y_upper = max_loc[1]
            rbc.limit_lin_z_upper = max_loc[2]

            rbc.limit_lin_x_lower = min_loc[0]
            rbc.limit_lin_y_lower = min_loc[1]
            rbc.limit_lin_z_lower = min_loc[2]

            max_rot = mathutils.Vector(
                joint.maximum_rotation) * self.TO_BLE_MATRIX
            min_rot = mathutils.Vector(
                joint.minimum_rotation) * self.TO_BLE_MATRIX
            rbc.limit_ang_x_upper = -min_rot[0]
            rbc.limit_ang_y_upper = -min_rot[1]
            rbc.limit_ang_z_upper = -min_rot[2]

            rbc.limit_ang_x_lower = -max_rot[0]
            rbc.limit_ang_y_lower = -max_rot[1]
            rbc.limit_ang_z_lower = -max_rot[2]

            # spring_damp = mathutils.Vector(joint.spring_constant) * self.TO_BLE_MATRIX
            # rbc.spring_damping_x = spring_damp[0]
            # rbc.spring_damping_y = spring_damp[1]
            # rbc.spring_damping_z = spring_damp[2]

            self.__jointTable.append(obj)
            bpy.ops.object.select_all(action='DESELECT')
            obj.select = True
            bpy.context.scene.objects.active = self.__armObj
            bpy.ops.object.parent_set(type='OBJECT',
                                      xmirror=False,
                                      keep_transform=True)

            # spring_stiff = mathutils.Vector()
            # rbc.spring_stiffness_x = spring_stiff[0]
            # rbc.spring_stiffness_y = spring_stiff[1]
            # rbc.spring_stiffness_z = spring_stiff[2]

            if rigid1.rigid_body.kinematic:
                self.__makeSpring(
                    rigid2, rigid1,
                    mathutils.Vector(joint.spring_rotation_constant) *
                    self.TO_BLE_MATRIX)
            if rigid2.rigid_body.kinematic:
                self.__makeSpring(
                    rigid1, rigid2,
                    mathutils.Vector(joint.spring_rotation_constant) *
                    self.TO_BLE_MATRIX)

    def __importMaterials(self):
        self.__importTextures()
        bpy.types.Material.ambient_color = bpy.props.FloatVectorProperty(
            name='ambient color')

        pmxModel = self.__model

        self.__materialTable = []
        self.__materialFaceCountTable = []
        for i in pmxModel.materials:
            mat = bpy.data.materials.new(name=i.name)
            mat.diffuse_color = i.diffuse[0:3]
            mat.alpha = i.diffuse[3]
            mat.ambient_color = i.ambient
            mat.specular_color = i.specular[0:3]
            mat.specular_alpha = i.specular[3]
            self.__materialFaceCountTable.append(int(i.vertex_count / 3))
            self.__meshObj.data.materials.append(mat)
            if i.texture != -1:
                texture_slot = mat.texture_slots.add()
                texture_slot.use_map_alpha = True
                texture_slot.texture = self.__textureTable[i.texture]
                texture_slot.texture_coords = 'UV'
                mat.use_transparency = True
                mat.transparency_method = 'Z_TRANSPARENCY'
                mat.alpha = 0

    def __importFaces(self):
        pmxModel = self.__model
        mesh = self.__meshObj.data

        mesh.tessfaces.add(len(pmxModel.faces))
        uvLayer = mesh.tessface_uv_textures.new()
        for i, f in enumerate(pmxModel.faces):
            bf = mesh.tessfaces[i]
            bf.vertices_raw = list(f) + [0]
            bf.use_smooth = True
            face_count = 0
            uv = uvLayer.data[i]
            uv.uv1 = self.flipUV_V(pmxModel.vertices[f[0]].uv)
            uv.uv2 = self.flipUV_V(pmxModel.vertices[f[1]].uv)
            uv.uv3 = self.flipUV_V(pmxModel.vertices[f[2]].uv)

            bf.material_index = self.__getMaterialIndexFromFaceIndex(i)

    def __importVertexMorphs(self):
        pmxModel = self.__model

        utils.selectAObject(self.__meshObj)
        bpy.ops.object.shape_key_add()

        for morph in filter(lambda x: isinstance(x, pmx.VertexMorph),
                            pmxModel.morphs):
            shapeKey = self.__meshObj.shape_key_add(morph.name)
            for md in morph.offsets:
                shapeKeyPoint = shapeKey.data[md.index]
                offset = mathutils.Vector(md.offset) * self.TO_BLE_MATRIX
                shapeKeyPoint.co = shapeKeyPoint.co + offset * self.__scale

    def __hideRigidsAndJoints(self, obj):
        if obj.is_mmd_rigid or obj.is_mmd_joint or obj.is_mmd_non_collision_constraint or obj.is_mmd_spring_joint or obj.is_mmd_spring_goal:
            obj.hide = True

        for i in obj.children:
            self.__hideRigidsAndJoints(i)

    def __addArmatureModifier(self, meshObj, armObj):
        armModifier = meshObj.modifiers.new(name='Armature', type='ARMATURE')
        armModifier.object = armObj
        armModifier.use_vertex_groups = True

    def __renameLRBones(self):
        pose_bones = self.__armObj.pose.bones
        for i in pose_bones:
            if i.is_mmd_shadow_bone:
                continue
            i.mmd_bone_name_j = i.name
            i.name = utils.convertNameToLR(i.name)
            self.__meshObj.vertex_groups[i.mmd_bone_name_j].name = i.name

    def execute(self, **args):
        if 'pmx' in args:
            self.__model = args['pmx']
        else:
            self.__model = pmx.load(args['filepath'])

        self.__scale = args.get('scale', 1.0)
        renameLRBones = args.get('rename_LR_bones', False)
        self.__onlyCollisions = args.get('only_collisions', False)
        self.__ignoreNonCollisionGroups = args.get(
            'ignore_non_collision_groups', True)
        self.__distance_of_ignore_collisions = args.get(
            'distance_of_ignore_collisions', 1)  # 衝突を考慮しない距離(非衝突グループ設定を無視する距離)
        self.__distance_of_ignore_collisions /= 2

        logging.info('****************************************')
        logging.info(' mmd_tools.import_pmx module')
        logging.info('----------------------------------------')
        logging.info(' Start to load model data form a pmx file')
        logging.info('            by the mmd_tools.pmx modlue.')
        logging.info('')

        start_time = time.time()

        self.__createGroups()
        self.__createObjects()

        self.__importVertices()
        self.__importBones()
        self.__importMaterials()
        self.__importFaces()
        self.__importRigids()
        self.__importJoints()

        self.__importVertexMorphs()

        if renameLRBones:
            self.__renameLRBones()

        self.__addArmatureModifier(self.__meshObj, self.__armObj)
        self.__meshObj.data.update()

        bpy.types.Object.pmx_import_scale = bpy.props.FloatProperty(
            name='pmx_import_scale')
        if args.get('hide_rigids', False):
            self.__hideRigidsAndJoints(self.__root)
        self.__armObj.pmx_import_scale = self.__scale

        for i in [
                self.__rigidObjGroup.objects, self.__jointObjGroup.objects,
                self.__tempObjGroup.objects
        ]:
            for j in i:
                self.__allObjGroup.objects.link(j)

        bpy.context.scene.gravity[2] = -9.81 * 10 * self.__scale

        logging.info(' Finished importing the model in %f seconds.',
                     time.time() - start_time)
        logging.info('----------------------------------------')
        logging.info(' mmd_tools.import_pmx module')
        logging.info('****************************************')
Exemplo n.º 14
0
def import_ifc(filename, use_names, process_relations, blender_booleans):
    from . import ifcopenshell
    from .ifcopenshell import geom as ifcopenshell_geom
    from .ifcopenshell.file import file as ifcopenshell_file
    print("Reading %s..." % bpy.path.basename(filename))
    settings = ifcopenshell_geom.settings()
    settings.set(settings.DISABLE_OPENING_SUBTRACTIONS, blender_booleans)
    ifcfile = ifcopenshell.open(filename)
    iterator = ifcopenshell_geom.iterator(settings, ifcfile)
    valid_file = iterator.initialize()
    if not valid_file:
        return False
    print("Done reading file")
    id_to_object = defaultdict(list)
    id_to_parent = {}
    id_to_name = {}
    id_to_matrix = {}
    openings = []
    old_progress = -1
    print("Creating geometry...")

    root_collection = bpy.data.collections.new(os.path.basename(filename))
    bpy.context.scene.collection.children.link(root_collection)

    guid_collections = {}
    collections = {
        0: root_collection,
    }

    def get_collection(cid):
        collection = collections.get(cid)
        if collection is None:
            collection = bpy.data.collections.new(str(cid))
            collections[cid] = collection
            root_collection.children.link(collection)
        return collection

    while True:
        ob = iterator.get()

        f = ob.geometry.faces
        v = ob.geometry.verts
        mats = ob.geometry.materials
        matids = ob.geometry.material_ids
        m = ob.transformation.matrix.data
        t = ob.type[0:21]
        nm = ob.name if len(ob.name) and use_names else ob.guid
        id_to_name[ob.id] = os.path.commonprefix((id_to_name.get(ob.id,
                                                                 nm), nm))

        verts = [[v[i], v[i + 1], v[i + 2]] \
            for i in range(0, len(v), 3)]
        faces = [[f[i], f[i + 1], f[i + 2]] \
            for i in range(0, len(f), 3)]

        me = bpy.data.meshes.new('mesh{}'.format(ob.geometry.id))
        me.from_pydata(verts, [], faces)
        me.validate()

        def add_material(mname, props):
            if mname in bpy.data.materials:
                mat = bpy.data.materials[mname]
                mat.use_fake_user = True
            else:
                mat = bpy.data.materials.new(mname)
                for k, v in props.items():
                    if k == 'specular_hardness':
                        # FIXME: This seems to not be available in Blender 2.80
                        continue
                    try:
                        if hasattr(v, '__len__'):
                            getattr(mat, k)[:len(v)] = v
                        else:
                            setattr(mat, k, v)
                    except Exception as exc:
                        logging.exception(
                            'Failed setting %s with property %s = %s: %s', mat,
                            k, v, exc)

            me.materials.append(mat)

        needs_default = -1 in matids
        if needs_default: add_material(t, {})

        for mat in mats:
            props = {}
            if mat.has_diffuse: props['diffuse_color'] = mat.diffuse
            if mat.has_specular: props['specular_color'] = mat.specular
            if mat.has_transparency and mat.transparency > 0:
                props['alpha'] = 1.0 - mat.transparency
                props['use_transparency'] = True
            if mat.has_specularity:
                props['specular_hardness'] = mat.specularity
            add_material(mat.name, props)

        bob = bpy.data.objects.new(nm, me)
        mat = mathutils.Matrix(([m[0], m[1], m[2],
                                 0], [m[3], m[4], m[5],
                                      0], [m[6], m[7], m[8],
                                           0], [m[9], m[10], m[11], 1]))

        if transpose_matrices: mat.transpose()

        if process_relations:
            id_to_matrix[ob.id] = mat
        else:
            bob.matrix_world = mat

        get_collection(ob.parent_id).objects.link(bob)
        bpy.context.view_layer.objects.active = bob

        bpy.ops.object.mode_set(mode='EDIT')
        bpy.ops.mesh.normals_make_consistent()
        bpy.ops.object.mode_set(mode='OBJECT')

        bob.ifc_id, bob.ifc_guid, bob.ifc_name, bob.ifc_type = \
            ob.id, ob.guid, ob.name, ob.type

        if ob.type == 'IfcSpace' or ob.type == 'IfcOpeningElement':
            if not (ob.type == 'IfcOpeningElement' and blender_booleans):
                bob.hide_set(True)
            bob.display_type = 'WIRE'

        id_to_object[ob.id].append(bob)

        if ob.parent_id > 0:
            id_to_parent[ob.id] = ob.parent_id

        if blender_booleans and ob.type == 'IfcOpeningElement':
            openings.append(ob.id)

        faces = me.polygons if hasattr(me, 'polygons') else me.faces
        if len(faces) == len(matids):
            for face, matid in zip(faces, matids):
                face.material_index = matid + (1 if needs_default else 0)

        progress = iterator.progress() // 2
        if progress > old_progress:
            print("\r[" + "#" * progress + " " * (50 - progress) + "]", end="")
            old_progress = progress
        if not iterator.next():
            break

    print("\rDone creating geometry" + " " * 30)

    id_to_parent_temp = dict(id_to_parent)

    if process_relations:
        print("Processing relations...")

    for cid, collection in collections.items():
        if cid == 0:
            continue

        name = id_to_name.get(cid)
        if name is None:
            ifc_obj = None
            try:
                ifc_obj = ifcfile.by_id(cid)
            except Exception:
                logging.exception("Failed to resolve id %s to object", cid)
            try:
                name = ifc_obj.Name
            except Exception:
                name = 'unresolved_{}'.format(cid)
                logging.exception("Failed to retrieve name from %s", ifc_obj)
        collection.name = name
        parent_cid = id_to_parent.get(cid)
        parent_collection = collections.get(parent_cid, root_collection)
        root_collection.children.unlink(collection)
        parent_collection.children.link(collection)

    while len(id_to_parent_temp) and process_relations:
        id, parent_id = id_to_parent_temp.popitem()

        if parent_id in id_to_object:
            bob = id_to_object[parent_id][0]
        else:
            parent_ob = iterator.getObject(parent_id)
            if parent_ob.id == -1:
                bob = None
            else:
                m = parent_ob.transformation.matrix.data
                nm = parent_ob.name if len(parent_ob.name) and use_names \
                    else parent_ob.guid
                bob = bpy.data.objects.new(nm, None)

                mat = mathutils.Matrix((
                    [m[0], m[1], m[2], 0],
                    [m[3], m[4], m[5], 0],
                    [m[6], m[7], m[8], 0],
                    [m[9], m[10], m[11], 1],
                ))

                if transpose_matrices: mat.transpose()
                id_to_matrix[parent_ob.id] = mat

                bpy.context.scene.objects.link(bob)

                bob.ifc_id = parent_ob.id
                bob.ifc_name, bob.ifc_type, bob.ifc_guid = \
                    parent_ob.name, parent_ob.type, parent_ob.guid

                if parent_ob.parent_id > 0:
                    id_to_parent[parent_id] = parent_ob.parent_id
                    id_to_parent_temp[parent_id] = parent_ob.parent_id
                id_to_object[parent_id].append(bob)
        if bob:
            for ob in id_to_object[id]:
                ob.parent = bob

    id_to_matrix_temp = dict(id_to_matrix)

    while len(id_to_matrix_temp):
        id, matrix = id_to_matrix_temp.popitem()
        parent_id = id_to_parent.get(id, None)
        parent_matrix = id_to_matrix.get(parent_id, None)
        for ob in id_to_object[id]:
            if parent_matrix:
                ob.matrix_local = parent_matrix.inverted() * matrix
            else:
                ob.matrix_world = matrix

    if process_relations:
        print("Done processing relations")

    for opening_id in openings:
        parent_id = id_to_parent[opening_id]
        if parent_id in id_to_object:
            parent_ob = id_to_object[parent_id][0]
            for opening_ob in id_to_object[opening_id]:
                mod = parent_ob.modifiers.new("opening", "BOOLEAN")
                mod.operation = "DIFFERENCE"
                mod.object = opening_ob

    #txt = bpy.data.texts.new("%s.log"%bpy.path.basename(filename))
    #txt.from_string(iterator.get_log())
    print(dir(iterator))

    return True
def create_dictionaries():
    """Creation of a list of all the robots and components in the scene.
       Uses the properties of the objects to determine what they are."""

    # Create a dictionary that stores initial positions of all objects
    # in the simulation, used to reset the simulation.
    persistantstorage.blender_objects = {}

    # Create a dictionary of the components in the scene
    persistantstorage.componentDict = {}

    # Create a dictionary of the robots in the scene
    persistantstorage.robotDict = {}

    # Create a dictionary of the external robots in the scene
    # Used for the multi-node simulation
    persistantstorage.externalRobotDict = {}

    # Create a dictionnary with the passive, but interactive (ie, with an
    # 'Object' property) objects in the scene.
    persistantstorage.passiveObjectsDict = {}

    # Create a dictionary with the modifiers
    persistantstorage.modifierDict = {}

    # Create a dictionary with the datastream interfaces used
    persistantstorage.datastreamDict = {}

    # this dictionary stores, for each components, the direction and the
    # configured datastream interfaces. Direction is 'IN' for streams
    # that are read by MORSE (typically, for actuators), and 'OUT'
    # for streams published by MORSE (typically, for sensors)
    persistantstorage.datastreams = {}

    # Create a dictionnary with the overlaid used
    persistantstorage.overlayDict = {}

    # Create the 'request managers' manager
    persistantstorage.morse_services = MorseServices()

    scene = morse.core.blenderapi.scene()

    # Store the position and orientation of all objects
    for obj in scene.objects:
        if obj.parent == None:
            import mathutils
            pos = mathutils.Vector(obj.worldPosition)
            ori = mathutils.Matrix(obj.worldOrientation)
            persistantstorage.blender_objects[obj] = [pos, ori]

    # Get the list of passive interactive objects.

    # These objects have a 'Object' property set to true
    # (plus several other optional properties).
    # See the documentation for the up-to-date list
    # (doc/morse/user/others/passive_objects.rst) -- or read the code below :-)
    for obj in scene.objects:
        # Check the object has an 'Object' property set to true
        if 'Object' in obj and obj['Object']:
            details = {
                'label': obj['Label'] if 'Label' in obj else str(obj),
                'description':
                obj['Description'] if 'Description' in obj else "",
                'type': obj['Type'] if 'Type' in obj else "Object",
                'graspable': obj['Graspable'] if 'Graspable' in obj else False
            }
            persistantstorage.passiveObjectsDict[obj] = details
            logger.info("Added {name} as a {graspable}active object".format(
                name=details['label'],
                graspable="graspable " if details['graspable'] else ""))

    if not persistantstorage.passiveObjectsDict:
        logger.info("No passive objects in the scene.")

    # Get the robots
    for obj in scene.objects:
        if 'Robot_Tag' in obj or 'External_Robot_Tag' in obj:
            if not 'classpath' in obj:
                logger.error(
                    "No 'classpath' in %s\n  Please make sure you are "
                    "using the new builder classes" % str(obj.name))
                return False
            # Create an object instance and store it
            instance = create_instance_level(obj['classpath'],
                                             obj.get('abstraction_level'), obj)

            if not instance:
                logger.error("Could not create %s" % str(obj['classpath']))
                return False
            # store instance in persistant storage dictionary
            if 'Robot_Tag' in obj:
                persistantstorage.robotDict[obj] = instance
            else:
                persistantstorage.externalRobotDict[obj] = instance

    if not (persistantstorage.robotDict
            or persistantstorage.externalRobotDict):  # No robot!
        logger.error("""
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    INITIALIZATION ERROR: no robot in your simulation!
    
    Do not forget that components _must_ belong to a
    robot (you can not have free objects)
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            """)
        return False

    # Get the robot and its instance
    for obj, robot_instance in persistantstorage.robotDict.items():
        if not _associate_child_to_robot(obj, robot_instance, False):
            return False

    # Get the external robot and its instance
    for obj, robot_instance in persistantstorage.externalRobotDict.items():
        if not _associate_child_to_robot(obj, robot_instance, True):
            return False

    # Check we have no 'free' component (they all must belong to a robot)
    for obj in scene.objects:
        try:
            obj['Component_Tag']
            if obj.name not in persistantstorage.componentDict.keys():
                logger.error("""
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    INITIALIZATION ERROR: the component '""" + obj.name + """' does not
    belong to any robot: you need to fix that by 
    parenting it to a robot.                    
    !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                    """)
                return False
        except KeyError as detail:
            pass

    # Will return true always (for the moment)
    return True
Exemplo n.º 16
0
    def updateRigid(self, rigid_obj):
        if rigid_obj.mmd_type != 'RIGID_BODY':
            raise TypeError('rigid_obj must be a mmd_rigid object')

        rigid = rigid_obj.mmd_rigid
        relation = rigid_obj.constraints['mmd_tools_rigid_parent']
        arm = relation.target
        bone_name = relation.subtarget
        target_bone = None
        if arm is not None and bone_name != '':
            target_bone = arm.pose.bones[bone_name]

        if target_bone is not None:
            for i in target_bone.constraints:
                if i.name == 'mmd_tools_rigid_track':
                    target_bone.constraints.remove(i)

        if int(rigid.type) == rigid_body.MODE_STATIC:
            rigid_obj.rigid_body.kinematic = True
        else:
            rigid_obj.rigid_body.kinematic = False

        if int(rigid.type) == rigid_body.MODE_STATIC:
            if arm is not None and bone_name != '':
                relation.mute = False
                relation.inverse_matrix = mathutils.Matrix(
                    target_bone.matrix).inverted()
            else:
                relation.mute = True
        else:
            relation.mute = True

        if int(rigid.type) in [
                rigid_body.MODE_DYNAMIC, rigid_body.MODE_DYNAMIC_BONE
        ] and arm is not None and target_bone is not None:
            empty = bpy.data.objects.new('mmd_bonetrack', None)
            bpy.context.scene.objects.link(empty)
            empty.location = target_bone.tail
            empty.empty_draw_size = 0.1
            empty.empty_draw_type = 'ARROWS'
            empty.mmd_type = 'TRACK_TARGET'
            empty.hide = True
            empty.parent = self.temporaryGroupObject()

            rigid_obj.mmd_rigid.bone = relation.subtarget
            rigid_obj.constraints.remove(relation)

            bpyutils.setParent(empty, rigid_obj)
            empty.select = False
            empty.hide = True

            for i in target_bone.constraints:
                if i.type == 'IK':
                    i.mute = True
            const = target_bone.constraints.new('DAMPED_TRACK')
            const.name = 'mmd_tools_rigid_track'
            const.target = empty

        t = rigid_obj.hide
        with bpyutils.select_object(rigid_obj):
            bpy.ops.object.transform_apply(location=False,
                                           rotation=False,
                                           scale=True)
        rigid_obj.hide = t

        rigid_obj.rigid_body.collision_shape = rigid.shape
Exemplo n.º 17
0
def _fvec16_to_matrix4(fvec):
    return mathutils.Matrix((fvec[0:4], fvec[4:8], fvec[8:12], fvec[12:16]))
Exemplo n.º 18
0
def make_mesh_chunk(mesh, materialDict, ob, name_to_id):
    """Make a chunk out of a Blender mesh."""

    # Extract the triangles from the mesh:
    tri_list = extract_triangles(mesh)
    if mesh.tessface_uv_textures:
        # Remove the face UVs and convert it to vertex UV:
        vert_array, uv_array, tri_list = remove_face_uv(
            mesh.vertices, tri_list)
    else:
        # Add the vertices to the vertex array:
        vert_array = _3ds_array()
        for vert in mesh.vertices:
            vert_array.add(_3ds_point_3d(vert.co))
# no UV at all:
        uv_array = None

# create the chunk:
    mesh_chunk = _3ds_chunk(OBJECT_MESH)

    # add vertex chunk:
    mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
    # add faces chunk:

    mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))

    mesh1 = _3ds_chunk(OBJECT_TRANS_MATRIX)

    scale_vector = ob.matrix_world.to_scale()
    offset_vector = ob.matrix_local.to_translation()

    ob_matrix = mathutils.Matrix().Identity(4)

    ob_matrix[0][0] = 1 / scale_vector[0]
    ob_matrix[1][1] = 1 / scale_vector[1]
    ob_matrix[2][2] = 1 / scale_vector[2]

    ob_matrix[3][0] = -offset_vector[0] / scale_vector[0]
    ob_matrix[3][1] = -offset_vector[1] / scale_vector[1]
    ob_matrix[3][2] = -offset_vector[2] / scale_vector[2]

    print("Matrix for " + ob.name + " that has " +
          ("no parent" if ob.parent == None else ob.parent.name +
           " as parent"))
    print(strMatrix(ob_matrix))

    mesh1.add_variable("w1", _3ds_float(ob_matrix[0][0]))
    mesh1.add_variable("w2", _3ds_float(ob_matrix[0][1]))
    mesh1.add_variable("w3", _3ds_float(ob_matrix[0][2]))
    mesh1.add_variable("x1", _3ds_float(ob_matrix[1][0]))
    mesh1.add_variable("x2", _3ds_float(ob_matrix[1][1]))
    mesh1.add_variable("x3", _3ds_float(ob_matrix[1][2]))
    mesh1.add_variable("y1", _3ds_float(ob_matrix[2][0]))
    mesh1.add_variable("y2", _3ds_float(ob_matrix[2][1]))
    mesh1.add_variable("y3", _3ds_float(ob_matrix[2][2]))
    mesh1.add_variable("z1", _3ds_float(ob_matrix[3][0]))
    mesh1.add_variable("z2", _3ds_float(ob_matrix[3][1]))
    mesh1.add_variable("z3", _3ds_float(ob_matrix[3][2]))

    mesh_chunk.add_subchunk(mesh1)

    # if available, add uv chunk:
    if uv_array:
        mesh_chunk.add_subchunk(make_uv_chunk(uv_array))

    return mesh_chunk
Exemplo n.º 19
0
class __PmxExporter:
    TO_PMX_MATRIX = mathutils.Matrix([[1.0, 0.0, 0.0,
                                       0.0], [0.0, 0.0, 1.0, 0.0],
                                      [0.0, 1.0, 0.0, 0.0],
                                      [0.0, 0.0, 0.0, 1.0]])

    def __init__(self):
        self.__model = None
        self.__bone_name_table = []
        self.__material_name_table = []

    @staticmethod
    def flipUV_V(uv):
        u, v = uv
        return [u, 1.0 - v]

    def __exportMeshes(self, meshes, bone_map):
        mat_map = {}
        for mesh in meshes:
            for index, mat_faces in mesh.material_faces.items():
                name = mesh.materials[index].name
                if name not in mat_map:
                    mat_map[name] = []
                mat_map[name].append((mat_faces, mesh.vertex_group_names))

        # export vertices
        for mat_name, mat_meshes in mat_map.items():
            face_count = 0
            for mat_faces, vertex_group_names in mat_meshes:
                mesh_vertices = []
                for face in mat_faces:
                    mesh_vertices.extend(face.vertices)

                for v in mesh_vertices:
                    if v.index is not None:
                        continue

                    v.index = len(self.__model.vertices)
                    pv = pmx.Vertex()
                    pv.co = list(v.co)
                    pv.normal = v.normal * -1
                    pv.uv = self.flipUV_V(v.uv)

                    t = len(v.groups)
                    if t == 0:
                        weight = pmx.BoneWeight()
                        weight.type = pmx.BoneWeight.BDEF1
                        weight.bones = [-1]
                        pv.weight = weight
                    elif t == 1:
                        weight = pmx.BoneWeight()
                        weight.type = pmx.BoneWeight.BDEF1
                        weight.bones = [
                            bone_map[vertex_group_names[v.groups[0][0]]]
                        ]
                        pv.weight = weight
                    elif t == 2:
                        vg1, vg2 = v.groups
                        weight = pmx.BoneWeight()
                        weight.type = pmx.BoneWeight.BDEF2
                        weight.bones = [
                            bone_map[vertex_group_names[vg1[0]]],
                            bone_map[vertex_group_names[vg2[0]]]
                        ]
                        weight.weights = [vg1[1]]
                        pv.weight = weight
                    else:
                        weight = pmx.BoneWeight()
                        weight.type = pmx.BoneWeight.BDEF4
                        weight.bones = [-1, -1, -1, -1]
                        weight.weights = [0.0, 0.0, 0.0, 0.0]
                        for i in range(min(len(v.groups), 4)):
                            gn, w = v.groups[i]
                            weight.bones[i] = bone_map[vertex_group_names[gn]]
                            weight.weights[i] = w
                        pv.weight = weight
                    self.__model.vertices.append(pv)

                for face in mat_faces:
                    self.__model.faces.append([x.index for x in face.vertices])
                face_count += len(mat_faces)
            self.__exportMaterial(bpy.data.materials[mat_name], face_count)

    def __exportTexture(self, filepath):
        if filepath.strip() == '':
            return -1
        filepath = os.path.abspath(filepath)
        for i, tex in enumerate(self.__model.textures):
            if tex.path == filepath:
                return i
        t = pmx.Texture()
        t.path = filepath
        self.__model.textures.append(t)
        if not os.path.isfile(t.path):
            logging.warning('  The texture file does not exist: %s', t.path)
        return len(self.__model.textures) - 1

    def __copy_textures(self, tex_dir):
        if not os.path.isdir(tex_dir):
            os.mkdir(tex_dir)
            logging.info('Create a texture directory: %s', tex_dir)

        for texture in self.__model.textures:
            path = texture.path
            dest_path = os.path.join(tex_dir, os.path.basename(path))
            shutil.copyfile(path, dest_path)
            logging.info('Copy file %s --> %s', path, dest_path)
            texture.path = dest_path

    def __exportMaterial(self, material, num_faces):
        p_mat = pmx.Material()
        mmd_mat = material.mmd_material

        p_mat.name = mmd_mat.name_j or material.name
        p_mat.name_e = mmd_mat.name_e or material.name
        p_mat.diffuse = list(material.diffuse_color) + [material.alpha]
        p_mat.ambient = mmd_mat.ambient_color
        p_mat.specular = list(
            material.specular_color) + [material.specular_alpha]
        p_mat.is_double_sided = mmd_mat.is_double_sided
        p_mat.enabled_drop_shadow = mmd_mat.enabled_drop_shadow
        p_mat.enabled_self_shadow_map = mmd_mat.enabled_self_shadow_map
        p_mat.enabled_self_shadow = mmd_mat.enabled_self_shadow
        p_mat.enabled_toon_edge = mmd_mat.enabled_toon_edge
        p_mat.edge_color = mmd_mat.edge_color
        p_mat.edge_size = mmd_mat.edge_weight
        p_mat.sphere_texture_mode = int(mmd_mat.sphere_texture_type)
        p_mat.comment = mmd_mat.comment

        p_mat.vertex_count = num_faces * 3
        tex_slots = material.texture_slots.values()
        if tex_slots[0]:
            tex = tex_slots[0].texture
            index = self.__exportTexture(tex.image.filepath)
            p_mat.texture = index
        if tex_slots[1]:
            tex = tex_slots[1].texture
            index = self.__exportTexture(tex.image.filepath)
            p_mat.sphere_texture = index

        if mmd_mat.is_shared_toon_texture:
            p_mat.toon_texture = mmd_mat.shared_toon_texture
            p_mat.is_shared_toon_texture = True
        else:
            p_mat.toon_texture = self.__exportTexture(mmd_mat.toon_texture)
            p_mat.is_shared_toon_texture = False

        self.__material_name_table.append(material.name)
        self.__model.materials.append(p_mat)

    @classmethod
    def __countBoneDepth(cls, bone):
        if bone.parent is None:
            return 0
        else:
            return cls.__countBoneDepth(bone.parent) + 1

    def __exportBones(self):
        """ Export bones.
        Returns:
            A dictionary to map Blender bone names to bone indices of the pmx.model instance.
        """
        arm = self.__armature
        boneMap = {}
        pmx_bones = []
        pose_bones = arm.pose.bones
        world_mat = arm.matrix_world
        r = {}

        # sort by a depth of bones.
        t = []
        for i in pose_bones:
            t.append((i, self.__countBoneDepth(i)))

        sorted_bones = sorted(pose_bones, key=self.__countBoneDepth)

        with bpyutils.edit_object(arm) as data:
            for p_bone in sorted_bones:
                bone = data.edit_bones[p_bone.name]
                if p_bone.is_mmd_shadow_bone:
                    continue
                pmx_bone = pmx.Bone()
                if p_bone.mmd_bone.name_j != '':
                    pmx_bone.name = p_bone.mmd_bone.name_j
                else:
                    pmx_bone.name = bone.name

                mmd_bone = p_bone.mmd_bone
                if mmd_bone.additional_transform_bone_id != -1:
                    fnBone = FnBone.from_bone_id(
                        arm, mmd_bone.additional_transform_bone_id)
                    pmx_bone.additionalTransform = (
                        fnBone.pose_bone,
                        mmd_bone.additional_transform_influence)
                pmx_bone.hasAdditionalRotate = mmd_bone.has_additional_rotation
                pmx_bone.hasAdditionalLocation = mmd_bone.has_additional_location

                pmx_bone.name_e = p_bone.mmd_bone.name_e or ''
                pmx_bone.location = world_mat * mathutils.Vector(
                    bone.head) * self.__scale * self.TO_PMX_MATRIX
                pmx_bone.parent = bone.parent
                pmx_bone.visible = mmd_bone.is_visible
                pmx_bone.isMovable = not all(p_bone.lock_location)
                pmx_bone.isRotatable = not all(p_bone.lock_rotation)
                pmx_bones.append(pmx_bone)
                self.__bone_name_table.append(p_bone.name)
                boneMap[bone] = pmx_bone
                r[bone.name] = len(pmx_bones) - 1

                if p_bone.mmd_bone.is_tip:
                    pmx_bone.displayConnection = -1
                elif p_bone.mmd_bone.use_tail_location:
                    tail_loc = world_mat * mathutils.Vector(
                        bone.tail) * self.__scale * self.TO_PMX_MATRIX
                    pmx_bone.displayConnection = tail_loc - pmx_bone.location
                else:
                    for child in bone.children:
                        if child.use_connect:
                            pmx_bone.displayConnection = child
                            break
                    #if not pmx_bone.displayConnection: #I think this wasn't working properly
                    #pmx_bone.displayConnection = bone.tail - bone.head

                #add fixed and local axes
                if mmd_bone.enabled_fixed_axis:
                    pmx_bone.axis = mmd_bone.fixed_axis

                if mmd_bone.enabled_local_axes:
                    pmx_bone.localCoordinate = pmx.Coordinate(
                        mmd_bone.local_axis_x, mmd_bone.local_axis_z)

            for idx, i in enumerate(pmx_bones):
                if i.parent is not None:
                    i.parent = pmx_bones.index(boneMap[i.parent])
                    logging.debug('the parent of %s:%s: %s', idx, i.name,
                                  i.parent)
                if isinstance(i.displayConnection, pmx.Bone):
                    i.displayConnection = pmx_bones.index(i.displayConnection)
                elif isinstance(i.displayConnection, bpy.types.EditBone):
                    i.displayConnection = pmx_bones.index(
                        boneMap[i.displayConnection])

                if i.additionalTransform is not None:
                    b, influ = i.additionalTransform
                    i.additionalTransform = (r[b.name], influ)

            self.__model.bones = pmx_bones
        return r

    def __exportIKLinks(self, pose_bone, pmx_bones, bone_map, ik_links, count):
        if count <= 0:
            return ik_links

        logging.debug('    Create IK Link for %s', pose_bone.name)
        ik_link = pmx.IKLink()
        ik_link.target = bone_map[pose_bone.name]
        if pose_bone.use_ik_limit_x or pose_bone.use_ik_limit_y or pose_bone.use_ik_limit_z:
            minimum = []
            maximum = []
            if pose_bone.use_ik_limit_x:
                minimum.append(-pose_bone.ik_max_x)
                maximum.append(-pose_bone.ik_min_x)
            else:
                minimum.append(0.0)
                maximum.append(0.0)

            if pose_bone.use_ik_limit_y:
                minimum.append(pose_bone.ik_min_y)
                maximum.append(pose_bone.ik_max_y)
            else:
                minimum.append(0.0)
                maximum.append(0.0)

            if pose_bone.use_ik_limit_z:
                minimum.append(pose_bone.ik_min_z)
                maximum.append(pose_bone.ik_max_z)
            else:
                minimum.append(0.0)
                maximum.append(0.0)
            ik_link.minimumAngle = minimum
            ik_link.maximumAngle = maximum

        if pose_bone.parent is not None:
            return self.__exportIKLinks(pose_bone.parent, pmx_bones, bone_map,
                                        ik_links + [ik_link], count - 1)
        else:
            return ik_link + [ik_link]

    def __exportIK(self, bone_map):
        """ Export IK constraints
         @param bone_map the dictionary to map Blender bone names to bone indices of the pmx.model instance.
        """
        pmx_bones = self.__model.bones
        arm = self.__armature
        pose_bones = arm.pose.bones
        for bone in pose_bones:
            for c in bone.constraints:
                if c.type == 'IK':
                    logging.debug('  Found IK constraint.')
                    ik_pose_bone = pose_bones[c.subtarget]
                    if ik_pose_bone.mmd_shadow_bone_type == 'IK_TARGET':
                        ik_bone_index = bone_map[ik_pose_bone.parent.name]
                        logging.debug('  Found IK proxy bone: %s -> %s',
                                      ik_pose_bone.name,
                                      ik_pose_bone.parent.name)
                    else:
                        ik_bone_index = bone_map[c.subtarget]

                    ik_target_bone = self.__get_connected_child_bone(bone)
                    pmx_ik_bone = pmx_bones[ik_bone_index]
                    pmx_ik_bone.isIK = True
                    pmx_ik_bone.loopCount = c.iterations
                    pmx_ik_bone.transform_order += 1
                    pmx_ik_bone.target = bone_map[ik_target_bone.name]
                    pmx_ik_bone.ik_links = self.__exportIKLinks(
                        bone, pmx_bones, bone_map, [], c.chain_count)

    def __get_connected_child_bone(self, target_bone):
        """ Get a connected child bone.

         Args:
             target_bone: A blender PoseBone

         Returns:
             A bpy.types.PoseBone object which is the closest bone from the tail position of target_bone.
             Return None if target_bone has no child bones.
        """
        r = None
        min_length = None
        for c in target_bone.children:
            length = (c.head - target_bone.tail).length
            if not min_length or length < min_length:
                min_length = length
                r = c
        return r

    def __exportVertexMorphs(self, meshes, root):
        shape_key_names = []
        for mesh in meshes:
            for i in mesh.shape_key_names:
                if i not in shape_key_names:
                    shape_key_names.append(i)

        morph_categories = {}
        if root:
            categories = {
                'SYSTEM': pmx.Morph.CATEGORY_SYSTEM,
                'EYEBROW': pmx.Morph.CATEGORY_EYEBROW,
                'EYE': pmx.Morph.CATEGORY_EYE,
                'MOUTH': pmx.Morph.CATEGORY_MOUTH,
            }
            for item in root.mmd_root.display_item_frames[u'表情'].items:
                morph_categories[item.name] = categories.get(
                    item.morph_category, pmx.Morph.CATEGORY_OHTER)

        for i in shape_key_names:
            exported_vert = set()
            morph = pmx.VertexMorph(i, '', 4)
            morph.category = morph_categories.get(i, pmx.Morph.CATEGORY_OHTER)
            for mesh in meshes:
                vertices = []
                for mf in mesh.material_faces.values():
                    for f in mf:
                        vertices.extend(f.vertices)

                if i in mesh.shape_key_names:
                    for v in vertices:
                        if v.index in exported_vert:
                            continue
                        exported_vert.add(v.index)

                        offset = v.offsets[mesh.shape_key_names.index(i)]
                        if mathutils.Vector(offset).length < 0.001:
                            continue

                        mo = pmx.VertexMorphOffset()
                        mo.index = v.index
                        mo.offset = offset
                        morph.offsets.append(mo)
            self.__model.morphs.append(morph)

    def __export_material_morphs(self, root):
        mmd_root = root.mmd_root
        for morph in mmd_root.material_morphs:
            mat_morph = pmx.MaterialMorph(name=morph.name,
                                          name_e=morph.name_e,
                                          category=morph.category)
            for data in morph.data:
                morph_data = pmx.MaterialMorphOffset()
                try:
                    morph_data.index = self.__material_name_table.index(
                        data.material)
                except ValueError:
                    morph_data.index = -1
                morph_data.diffuse_offset = data.diffuse_color
                morph_data.specular_offset = data.specular_color
                morph_data.ambient_offset = data.ambient_color
                morph_data.edge_color_offset = data.edge_color
                morph_data.edge_size_offset = data.edge_weight
                morph_data.texture_factor = data.texture_factor
                morph_data.sphere_texture_factor = data.sphere_texture_factor
                morph_data.toon_texture_factor = data.toon_texture_factor
                mat_morph.offsets.append(morph_data)
            self.__model.morphs.append(mat_morph)

    def __sortMaterials(self):
        """ sort materials for alpha blending

         モデル内全頂点の平均座標をモデルの中心と考えて、
         モデル中心座標とマテリアルがアサインされている全ての面の構成頂点との平均距離を算出。
         この値が小さい順にソートしてみる。
         モデル中心座標から離れている位置で使用されているマテリアルほどリストの後ろ側にくるように。
         かなりいいかげんな実装
        """
        center = mathutils.Vector([0, 0, 0])
        vertices = self.__model.vertices
        vert_num = len(vertices)
        for v in self.__model.vertices:
            center += mathutils.Vector(v.co) / vert_num

        faces = self.__model.faces
        offset = 0
        distances = []
        for mat in self.__model.materials:
            d = 0
            face_num = int(mat.vertex_count / 3)
            for i in range(offset, offset + face_num):
                face = faces[i]
                d += (mathutils.Vector(vertices[face[0]].co) - center).length
                d += (mathutils.Vector(vertices[face[1]].co) - center).length
                d += (mathutils.Vector(vertices[face[2]].co) - center).length
            distances.append((d / mat.vertex_count, mat, offset, face_num))
            offset += face_num
        sorted_faces = []
        sorted_mat = []
        for mat, offset, vert_count in [
            (x[1], x[2], x[3]) for x in sorted(distances, key=lambda x: x[0])
        ]:
            sorted_faces.extend(faces[offset:offset + vert_count])
            sorted_mat.append(mat)
        self.__model.materials = sorted_mat
        self.__model.faces = sorted_faces

    def __export_bone_morphs(self, root):
        mmd_root = root.mmd_root
        for morph in mmd_root.bone_morphs:
            bone_morph = pmx.BoneMorph(name=morph.name,
                                       name_e=morph.name_e,
                                       category=morph.category)
            for data in morph.data:
                morph_data = pmx.BoneMorphOffset()
                try:
                    morph_data.index = self.__bone_name_table.index(data.bone)
                except ValueError:
                    morph_data.index = -1
                morph_data.location_offset = data.location
                morph_data.rotation_offset = data.rotation
                bone_morph.offsets.append(morph_data)
            self.__model.morphs.append(bone_morph)

    def __exportDisplayItems(self, root, bone_map):
        res = []
        morph_map = {}
        for i, m in enumerate(self.__model.morphs):
            morph_map[m.name] = i
        for i in root.mmd_root.display_item_frames:
            d = pmx.Display()
            d.name = i.name
            d.name_e = i.name_e
            d.isSpecial = i.is_special
            items = []
            for j in i.items:
                if j.type == 'BONE' and j.name in bone_map:
                    items.append((0, bone_map[j.name]))
                elif j.type == 'MORPH' and j.name in morph_map:
                    items.append((1, morph_map[j.name]))
                else:
                    logging.warning('Display item (%s, %s) was not found.',
                                    j.type, j.name)
            d.data = items
            res.append(d)
        self.__model.display = res

    def __exportRigidBodies(self, rigid_bodies, bone_map):
        rigid_map = {}
        rigid_cnt = 0
        for obj in rigid_bodies:
            p_rigid = pmx.Rigid()
            p_rigid.name = obj.mmd_rigid.name
            p_rigid.name_e = obj.mmd_rigid.name_e
            p_rigid.location = mathutils.Vector(
                obj.location) * self.__scale * self.TO_PMX_MATRIX
            p_rigid.rotation = mathutils.Vector(
                obj.rotation_euler) * self.TO_PMX_MATRIX * -1
            p_rigid.mode = int(obj.mmd_rigid.type)

            rigid_shape = obj.mmd_rigid.shape
            shape_size = mathutils.Vector(mmd_model.getRigidBodySize(obj))
            if rigid_shape == 'SPHERE':
                p_rigid.type = 0
                p_rigid.size = shape_size * self.__scale
            elif rigid_shape == 'BOX':
                p_rigid.type = 1
                p_rigid.size = shape_size * self.__scale * self.TO_PMX_MATRIX
            elif rigid_shape == 'CAPSULE':
                p_rigid.type = 2
                p_rigid.size = shape_size * self.__scale
            else:
                raise Exception('Invalid rigid body type: %s %s', obj.name,
                                rigid_shape)

            p_rigid.collision_group_number = obj.mmd_rigid.collision_group_number
            mask = 0
            for i, v in enumerate(obj.mmd_rigid.collision_group_mask):
                if not v:
                    mask += (1 << i)
            p_rigid.collision_group_mask = mask

            rb = obj.rigid_body
            p_rigid.mass = rb.mass
            p_rigid.friction = rb.friction
            p_rigid.bounce = rb.restitution
            p_rigid.velocity_attenuation = rb.linear_damping
            p_rigid.rotation_attenuation = rb.angular_damping

            if 'mmd_tools_rigid_parent' in obj.constraints:
                constraint = obj.constraints['mmd_tools_rigid_parent']
                bone = constraint.subtarget
                p_rigid.bone = bone_map.get(bone, -1)
            self.__model.rigids.append(p_rigid)
            rigid_map[obj] = rigid_cnt
            rigid_cnt += 1
        return rigid_map

    def __exportJoints(self, joints, rigid_map):
        for joint in joints:
            rbc = joint.rigid_body_constraint
            p_joint = pmx.Joint()
            mmd_joint = joint.mmd_joint
            p_joint.name = mmd_joint.name_j
            p_joint.name_e = mmd_joint.name_e
            p_joint.location = (mathutils.Vector(joint.location) *
                                self.TO_PMX_MATRIX * self.__scale).xyz
            p_joint.rotation = (mathutils.Vector(joint.rotation_euler) *
                                self.TO_PMX_MATRIX * -1).xyz
            p_joint.src_rigid = rigid_map.get(rbc.object1, -1)
            p_joint.dest_rigid = rigid_map.get(rbc.object2, -1)
            p_joint.maximum_location = (mathutils.Vector([
                rbc.limit_lin_x_upper,
                rbc.limit_lin_y_upper,
                rbc.limit_lin_z_upper,
            ]) * self.TO_PMX_MATRIX * self.__scale).xyz
            p_joint.minimum_location = (mathutils.Vector([
                rbc.limit_lin_x_lower,
                rbc.limit_lin_y_lower,
                rbc.limit_lin_z_lower,
            ]) * self.TO_PMX_MATRIX * self.__scale).xyz
            p_joint.maximum_rotation = (mathutils.Vector([
                rbc.limit_ang_x_lower,
                rbc.limit_ang_y_lower,
                rbc.limit_ang_z_lower,
            ]) * self.TO_PMX_MATRIX * -1).xyz
            p_joint.minimum_rotation = (mathutils.Vector([
                rbc.limit_ang_x_upper,
                rbc.limit_ang_y_upper,
                rbc.limit_ang_z_upper,
            ]) * self.TO_PMX_MATRIX * -1).xyz

            p_joint.spring_constant = (
                mathutils.Vector(mmd_joint.spring_linear) *
                self.TO_PMX_MATRIX).xyz
            p_joint.spring_rotation_constant = (
                mathutils.Vector(mmd_joint.spring_angular) *
                self.TO_PMX_MATRIX).xyz
            self.__model.joints.append(p_joint)

    @staticmethod
    def __convertFaceUVToVertexUV(vert_index, uv, vertices_map):
        vertices = vertices_map[vert_index]
        for i in vertices:
            if i.uv is None:
                i.uv = uv
                return i
            elif (i.uv[0] - uv[0])**2 + (i.uv[1] - uv[1])**2 < 0.0001:
                return i
        n = copy.deepcopy(i)
        n.uv = uv
        vertices.append(n)
        return n

    @staticmethod
    def __triangulate(mesh):
        bm = bmesh.new()
        bm.from_mesh(mesh)
        bmesh.ops.triangulate(bm, faces=bm.faces)
        bm.to_mesh(mesh)
        bm.free()

    def __loadMeshData(self, meshObj):
        shape_key_weights = []
        for i in meshObj.data.shape_keys.key_blocks:
            shape_key_weights.append(i.value)
            i.value = 0.0

        vertex_group_names = list(map(lambda x: x.name, meshObj.vertex_groups))

        base_mesh = meshObj.to_mesh(bpy.context.scene, True, 'PREVIEW', False)
        base_mesh.transform(meshObj.matrix_world)
        base_mesh.transform(self.TO_PMX_MATRIX * self.__scale)
        self.__triangulate(base_mesh)
        base_mesh.update(calc_tessface=True)

        base_vertices = {}
        for v in base_mesh.vertices:
            base_vertices[v.index] = [
                _Vertex(
                    v.co,
                    list([(x.group, x.weight) for x in v.groups
                          if x.weight > 0]), v.normal, [])
            ]

        # calculate offsets
        shape_key_names = []
        for i in meshObj.data.shape_keys.key_blocks[1:]:
            shape_key_names.append(i.name)
            i.value = 1.0
            mesh = meshObj.to_mesh(bpy.context.scene, True, 'PREVIEW', False)
            mesh.transform(meshObj.matrix_world)
            mesh.transform(self.TO_PMX_MATRIX * self.__scale)
            mesh.update(calc_tessface=True)
            for key in base_vertices.keys():
                base = base_vertices[key][0]
                v = mesh.vertices[key]
                base.offsets.append(
                    mathutils.Vector(v.co) - mathutils.Vector(base.co))
            bpy.data.meshes.remove(mesh)
            i.value = 0.0

        # load face data
        materials = {}
        for face, uv in zip(base_mesh.tessfaces,
                            base_mesh.tessface_uv_textures.active.data):
            if len(face.vertices) != 3:
                raise Exception
            v1 = self.__convertFaceUVToVertexUV(face.vertices[0], uv.uv1,
                                                base_vertices)
            v2 = self.__convertFaceUVToVertexUV(face.vertices[1], uv.uv2,
                                                base_vertices)
            v3 = self.__convertFaceUVToVertexUV(face.vertices[2], uv.uv3,
                                                base_vertices)

            t = _Face([v1, v2, v3], face.normal)
            if face.material_index not in materials:
                materials[face.material_index] = []
            materials[face.material_index].append(t)

        for i, sk in enumerate(meshObj.data.shape_keys.key_blocks):
            sk.value = shape_key_weights[i]

        return _Mesh(base_mesh, materials, shape_key_names, vertex_group_names,
                     base_mesh.materials)

    def execute(self, filepath, **args):
        root = args.get('root', None)
        self.__model = pmx.Model()
        self.__model.name = 'test'
        self.__model.name_e = 'test eng'
        if root is not None:
            self.__model.name = root.mmd_root.name
            self.__model.name_e = root.mmd_root.name_e

        self.__model.comment = 'exported by mmd_tools'

        meshes = args.get('meshes', [])
        self.__armature = args.get('armature', None)
        rigid_bodeis = args.get('rigid_bodies', [])
        joints = args.get('joints', [])
        self.__copyTextures = args.get('copy_textures', False)
        self.__filepath = filepath

        self.__scale = 1.0 / float(args.get('scale', 0.2))

        nameMap = self.__exportBones()
        self.__exportIK(nameMap)

        mesh_data = []
        for i in meshes:
            mesh_data.append(self.__loadMeshData(i))

        self.__exportMeshes(mesh_data, nameMap)
        self.__exportVertexMorphs(mesh_data, root)
        self.__sortMaterials()
        rigid_map = self.__exportRigidBodies(rigid_bodeis, nameMap)
        self.__exportJoints(joints, rigid_map)
        if root is not None:
            self.__exportDisplayItems(root, nameMap)
            self.__export_bone_morphs(root)
            self.__export_material_morphs(root)

        if self.__copyTextures:
            tex_dir = os.path.join(os.path.dirname(filepath), 'textures')
            self.__copy_textures(tex_dir)

        pmx.save(filepath, self.__model)
Exemplo n.º 20
0
    def GoZit(self, pathFile):
        scn = bpy.context.scene
        diff = False
        disp = False
        nmp = False
        utag = 0
        vertsData = []
        facesData = []
        polypaint = []
        try:
            fic = open(pathFile, 'rb')
        except:
            return
        fic.seek(36, 0)
        lenObjName = unpack('<I', fic.read(4))[0] - 16
        fic.seek(8, 1)
        objName = unpack('%ss' % lenObjName, fic.read(lenObjName))[0]
        print(objName.decode('utf-8'))
        objName = objName[8:].decode('utf-8')
        me = bpy.data.meshes.new(objName)
        tag = fic.read(4)

        objMode = remember_object_mode()

        while tag:
            if tag == b'\x89\x13\x00\x00':
                cnt = unpack('<L', fic.read(4))[0] - 8
                fic.seek(cnt, 1)
            elif tag == b'\x11\x27\x00\x00':  #Vertices
                fic.seek(4, 1)
                cnt = unpack('<Q', fic.read(8))[0]
                me.vertices.add(cnt)
                for i in range(cnt * 3):
                    vertsData.append(unpack('<f', fic.read(4))[0])
                me.vertices.foreach_set("co", vertsData)
                del vertsData
            elif tag == b'\x21\x4e\x00\x00':  #Faces
                fic.seek(4, 1)
                cnt = unpack('<Q', fic.read(8))[0]
                me.tessfaces.add(cnt)
                for i in range(cnt):
                    v1 = unpack('<L', fic.read(4))[0]
                    v2 = unpack('<L', fic.read(4))[0]
                    v3 = unpack('<L', fic.read(4))[0]
                    v4 = unpack('<L', fic.read(4))[0]
                    if v4 == 0xffffffff:
                        facesData.append(v1)
                        facesData.append(v2)
                        facesData.append(v3)
                        facesData.append(0)
                    elif v4 == 0:
                        facesData.append(v4)
                        facesData.append(v1)
                        facesData.append(v2)
                        facesData.append(v3)
                    else:
                        facesData.append(v1)
                        facesData.append(v2)
                        facesData.append(v3)
                        facesData.append(v4)
                me.tessfaces.foreach_set("vertices_raw", facesData)
                del facesData
            elif tag == b'\xa9\x61\x00\x00':  #UVs
                me.tessface_uv_textures.new()
                fic.seek(4, 1)
                cnt = unpack('<Q', fic.read(8))[0]
                for i in range(cnt):
                    uvFace = me.tessface_uv_textures[0].data[i]
                    x, y = unpack('<2f', fic.read(8))
                    uvFace.uv1 = (x, 1. - y)
                    x, y = unpack('<2f', fic.read(8))
                    uvFace.uv2 = (x, 1. - y)
                    x, y = unpack('<2f', fic.read(8))
                    uvFace.uv3 = (x, 1. - y)
                    x, y = unpack('<2f', fic.read(8))
                    uvFace.uv4 = (x, 1. - y)
            elif tag == b'\xb9\x88\x00\x00':  #Polypainting
                break
            elif tag == b'\x32\x75\x00\x00':  #Mask
                break
            elif tag == b'\x41\x9c\x00\x00':  #Polyroups
                break
            elif tag == b'\x00\x00\x00\x00':  #End
                break
            else:
                print("unknown tag:{0}\ntry to skip it...".format(tag))
                if utag >= 10:
                    print("...Too many mesh tags unknown...")
                    break
                utag += 1
                cnt = unpack('<I', fic.read(4))[0] - 8
                fic.seek(cnt, 1)
            tag = fic.read(4)
        me.transform(
            mathutils.Matrix([(1., 0., 0., 0.), (0., 0., 1., 0.),
                              (0., -1., 0., 0.), (0., 0., 0., 1.)]))
        if objName in scn.objects:
            ob = scn.objects[objName]
            backupMatrix = deepcopy(ob.matrix_world)

            oldMesh = ob.data.name

            instances = [
                obj for obj in bpy.data.objects if obj.data == ob.data
            ]
            for instance in instances:
                instance.data = me

            me.update(calc_tessface=True)
            utag = 0
            while tag:
                if tag == b'\xb9\x88\x00\x00':  #Polypainting
                    min = 255
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    for i in range(cnt):
                        data = unpack('<3B', fic.read(3))
                        unpack('<B', fic.read(1))  # Alpha
                        if data[0] < min: min = data[0]
                        polypaint.append(data)
                    if min < 250:
                        vertexColor = me.vertex_colors.new()
                        iv = 0
                        for poly in me.polygons:
                            for loop_index in poly.loop_indices:
                                loop = me.loops[loop_index]
                                v = loop.vertex_index
                                color = polypaint[v]
                                vertexColor.data[iv].color = mathutils.Color([
                                    color[2] / 255, color[1] / 255,
                                    color[0] / 255
                                ])
                                iv += 1
                    del polypaint
                elif tag == b'\x32\x75\x00\x00':  #Mask
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    if 'mask' in ob.vertex_groups:
                        ob.vertex_groups.remove(ob.vertex_groups['mask'])
                    groupMask = ob.vertex_groups.new('mask')
                    for i in range(cnt):
                        data = unpack('<H', fic.read(2))[0] / 65535.
                        groupMask.add([i], 1. - data, 'ADD')
                elif tag == b'\x41\x9c\x00\x00':  #Polyroups
                    groups = []
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    for i in range(cnt):
                        gr = unpack('<H', fic.read(2))[0]
                        #continue # <- Skip polygroups
                        if gr not in groups:
                            if str(gr) in ob.vertex_groups:
                                ob.vertex_groups.remove(
                                    ob.vertex_groups[str(gr)])
                            polygroup = ob.vertex_groups.new(str(gr))
                            groups.append(gr)
                        else:
                            polygroup = ob.vertex_groups[str(gr)]
                        for j in range(len(me.tessfaces[i].vertices_raw)):
                            polygroup.add([me.tessfaces[i].vertices_raw[j]],
                                          1., 'ADD')
                    try:
                        ob.vertex_groups.remove(ob.vertex_groups.get('0'))
                    except:
                        pass
                elif tag == b'\x00\x00\x00\x00':
                    break  #End
                elif tag == b'\xc9\xaf\x00\x00':  #Diff map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    diffName = unpack('%ss' % cnt,
                                      fic.read(cnt))[0].decode('utf-8')
                    diff = True
                elif tag == b'\xd9\xd6\x00\x00':  #Disp map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    dispName = unpack('%ss' % cnt,
                                      fic.read(cnt))[0].decode('utf-8')
                    disp = True
                elif tag == b'\x51\xc3\x00\x00':  #Normal map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    nmpName = unpack('%ss' % cnt,
                                     fic.read(cnt))[0].decode('utf-8')
                    nmp = True
                else:
                    print("unknown tag:{0}\ntry to skip it...".format(tag))
                    if utag >= 10:
                        print("...Too many object tags unknown...")
                        break
                    utag += 1
                    cnt = unpack('<I', fic.read(4))[0] - 8
                    fic.seek(cnt, 1)
                tag = fic.read(4)
            fic.close()
            bpy.ops.object.select_all(action='DESELECT')
            ob.select = True
            scn.objects.active = ob
            GoBmat = False
            slotDiff = 0
            slotDisp = 0
            slotNm = 0

            ob.matrix_world = backupMatrix
            ob.data.transform(backupMatrix.inverted())
            for matslot in bpy.data.meshes[oldMesh].materials:
                ob.data.materials.append(matslot)
            if ob.material_slots.items():
                for matslot in ob.material_slots:
                    if matslot.material:
                        for texslot in matslot.material.texture_slots:
                            if texslot:
                                if texslot.texture:
                                    if texslot.texture.type == 'IMAGE' and texslot.texture_coords == 'UV' and texslot.texture.image:
                                        if texslot.use_map_color_diffuse:
                                            slotDiff = texslot
                                        if texslot.use_map_displacement:
                                            slotDisp = texslot
                                        if texslot.use_map_normal:
                                            slotNm = texslot
                        GoBmat = matslot.material
                        break
            if diff:
                if slotDiff:
                    if bpy.path.display_name_from_filepath(
                            slotDiff.texture.image.filepath
                    ) == bpy.path.display_name_from_filepath(diffName):
                        slotDiff.texture.image.reload()
                        print("Image reloaded")
                    else:
                        slotDiff.texture.image = bpy.data.images.load(diffName)
                else:
                    if not GoBmat:
                        GoBmat = bpy.data.materials.new(
                            'GoB_{0}'.format(objName))
                        me.materials.append(GoBmat)
                    mtex = GoBmat.texture_slots.add()
                    mtex.texture = bpy.data.textures.new(
                        "GoB_diffuse", 'IMAGE')
                    mtex.texture.image = bpy.data.images.load(diffName)
                    mtex.texture_coords = 'UV'
                    mtex.use_map_color_diffuse = True
            if disp:
                if slotDisp:
                    if bpy.path.display_name_from_filepath(
                            slotDisp.texture.image.filepath
                    ) == bpy.path.display_name_from_filepath(dispName):
                        slotDisp.texture.image.reload()
                    else:
                        slotDisp.texture.image = bpy.data.images.load(dispName)
                else:
                    if not GoBmat:
                        GoBmat = bpy.data.materials.new(
                            'GoB_{0}'.format(objName))
                        me.materials.append(GoBmat)
                    mtex = GoBmat.texture_slots.add()
                    mtex.texture = bpy.data.textures.new(
                        "GoB_displacement", 'IMAGE')
                    mtex.texture.image = bpy.data.images.load(dispName)
                    mtex.texture_coords = 'UV'
                    mtex.use_map_color_diffuse = False
                    mtex.use_map_displacement = True
            if nmp:
                if slotNm:
                    if bpy.path.display_name_from_filepath(
                            slotNm.texture.image.filepath
                    ) == bpy.path.display_name_from_filepath(nmpName):
                        slotNm.texture.image.reload()
                    else:
                        slotNm.texture.image = bpy.data.images.load(nmpName)
                else:
                    if not GoBmat:
                        GoBmat = bpy.data.materials.new(
                            'GoB_{0}'.format(objName))
                        me.materials.append(GoBmat)
                    mtex = GoBmat.texture_slots.add()
                    mtex.texture = bpy.data.textures.new("GoB_normal", 'IMAGE')
                    mtex.texture.image = bpy.data.images.load(nmpName)
                    mtex.texture_coords = 'UV'
                    mtex.use_map_normal = True
                    mtex.use_map_color_diffuse = False
                    mtex.normal_factor = 1.
                    mtex.normal_map_space = 'TANGENT'
        else:
            me.update(calc_tessface=True)
            ob = bpy.data.objects.new(objName, me)
            scn.objects.link(ob)
            utag = 0
            while tag:
                if tag == b'\xb9\x88\x00\x00':  #Polypainting
                    min = 255
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    for i in range(cnt):
                        data = unpack('<3B', fic.read(3))
                        unpack('<B', fic.read(1))  # Alpha
                        if data[0] < min: min = data[0]
                        polypaint.append(data)
                    if min < 250:
                        vertexColor = me.vertex_colors.new()
                        iv = 0
                        for poly in me.polygons:
                            for loop_index in poly.loop_indices:
                                loop = me.loops[loop_index]
                                v = loop.vertex_index
                                color = polypaint[v]
                                vertexColor.data[iv].color = mathutils.Color([
                                    color[2] / 255, color[1] / 255,
                                    color[0] / 255
                                ])
                                iv += 1
                elif tag == b'\x32\x75\x00\x00':  #Mask
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    groupMask = ob.vertex_groups.new('mask')
                    for i in range(cnt):
                        data = unpack('<H', fic.read(2))[0] / 65535.
                        groupMask.add([i], 1. - data, 'ADD')
                elif tag == b'\x41\x9c\x00\x00':  #Polyroups
                    groups = []
                    fic.seek(4, 1)
                    cnt = unpack('<Q', fic.read(8))[0]
                    for i in range(cnt):
                        gr = unpack('<H', fic.read(2))[0]
                        if gr not in groups:
                            polygroup = ob.vertex_groups.new(str(gr))
                            groups.append(gr)
                        else:
                            polygroup = ob.vertex_groups[str(gr)]
                        for j in range(len(me.tessfaces[i].vertices_raw)):
                            polygroup.add([me.tessfaces[i].vertices_raw[j]],
                                          1., 'ADD')
                    try:
                        ob.vertex_groups.remove(ob.vertex_groups.get('0'))
                    except:
                        pass
                elif tag == b'\x00\x00\x00\x00':
                    break  #End
                elif tag == b'\xc9\xaf\x00\x00':  #Diff map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    diffName = unpack('%ss' % cnt, fic.read(cnt))[0]
                    print(diffName.decode('utf-8'))
                    img = bpy.data.images.load(
                        diffName.strip().decode('utf-8'))
                    diff = True
                    txtDiff = bpy.data.textures.new("GoB_diffuse", 'IMAGE')
                    txtDiff.image = img
                    me.uv_textures[0].data[0].image = img
                elif tag == b'\xd9\xd6\x00\x00':  #Disp map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    dispName = unpack('%ss' % cnt, fic.read(cnt))[0]
                    print(dispName.decode('utf-8'))
                    img = bpy.data.images.load(
                        dispName.strip().decode('utf-8'))
                    disp = True
                    txtDisp = bpy.data.textures.new("GoB_displacement",
                                                    'IMAGE')
                    txtDisp.image = img
                elif tag == b'\x51\xc3\x00\x00':  #Normal map
                    cnt = unpack('<I', fic.read(4))[0] - 16
                    fic.seek(8, 1)
                    nmpName = unpack('%ss' % cnt, fic.read(cnt))[0]
                    print(nmpName.decode('utf-8'))
                    img = bpy.data.images.load(nmpName.strip().decode('utf-8'))
                    nmp = True
                    txtNmp = bpy.data.textures.new("GoB_normal", 'IMAGE')
                    txtNmp.image = img
                    txtNmp.use_normal_map = True
                else:
                    print("unknown tag:{0}\ntry to skip it...".format(tag))
                    if utag >= 10:
                        print("...Too many object tags unknown...")
                        break
                    utag += 1
                    cnt = unpack('<I', fic.read(4))[0] - 8
                    fic.seek(cnt, 1)
                tag = fic.read(4)
            fic.close()
            bpy.ops.object.select_all(action='DESELECT')
            ob.select = True
            scn.objects.active = ob
            objMat = bpy.data.materials.new('GoB_{0}'.format(objName))
            if diff:
                mtex = objMat.texture_slots.add()
                mtex.texture = txtDiff
                mtex.texture_coords = 'UV'
                mtex.use_map_color_diffuse = True
            if disp:
                mtex = objMat.texture_slots.add()
                mtex.texture = txtDisp
                mtex.texture_coords = 'UV'
                mtex.use_map_color_diffuse = False
                mtex.use_map_displacement = True
            if nmp:
                mtex = objMat.texture_slots.add()
                mtex.texture = txtNmp
                mtex.texture_coords = 'UV'
                mtex.use_map_normal = True
                mtex.use_map_color_diffuse = False
                mtex.normal_factor = 1.
                mtex.normal_map_space = 'TANGENT'
            me.materials.append(objMat)

        restore_object_mode(objMode)

        return
Exemplo n.º 21
0
def write_tracks(context, filepath, include_hidden):
    """Main function, writes all data to the given filepath"""
    f = open(filepath, 'w', encoding='utf-8')
    f.write("%YAML:1.0\n")

    # general info about clip
    clip = context.scene.active_clip
    tr = clip.tracking
    fov = tr.camera.sensor_width / tr.camera.focal_length
    f.write("clip:\n"
            " path: {path}\n"
            " width: {width}\n"
            " height: {height}\n"
            " fov: {fov}\n"
            " distortion: {distortion}\n"
            " center-x: {center_x}\n"
            " center-y: {center_y}\n".format(
                path=bpy.path.relpath(clip.filepath,
                                      start=dirname(filepath))[2:],
                width=clip.size[0],
                height=clip.size[1],
                fov=fov,
                distortion=[tr.camera.k1, tr.camera.k2, tr.camera.k3],
                center_x=tr.camera.principal[0],
                center_y=tr.camera.principal[1]))

    # info about each frame's camera
    f.write("camera:\n")
    # Blender uses a different convention than the config file to be written
    flip = mathutils.Matrix(
        ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, -1, 0), (0, 0, 0, 1)))
    for camera in tr.reconstruction.cameras:
        cammat = camera.matrix * flip
        cam_inv = cammat.inverted()
        distances = [(cam_inv * track.bundle.to_4d()).zw for track in tr.tracks
                     if include_hidden or not track.hide]
        # guess near and far value based on distances to the tracked points
        near, far = 0.8 * min(z / w
                              for z, w in distances if z / w > 0), 2 * max(
                                  z / w for z, w in distances)
        persp = PerspectiveMatrix(fovx=fov,
                                  aspect=clip.size[0] / clip.size[1],
                                  near=near,
                                  far=far)
        f.write(" - frame: {frame}\n"
                "   near: {near}\n"
                "   far: {far}\n"
                "   projection: !!opencv-matrix\n"
                "    rows: 4\n"
                "    cols: 4\n"
                "    dt: f\n"
                "    data: [ {projection}]\n"
                "   position: !!opencv-matrix\n"
                "    rows: 4\n"
                "    cols: 1\n"
                "    dt: f\n"
                "    data: [ {position}]\n".format(
                    frame=camera.frame,
                    near=near,
                    far=far,
                    projection=", ".join(
                        str(val) for val in chain(*(persp * cam_inv))),
                    position=", ".join(
                        str(val) for val in cammat.translation.to_4d())))

    # info about each track
    f.write("tracks:\n")
    for track in tr.tracks:
        if include_hidden or not track.hide:
            f.write(" - bundle: !!opencv-matrix\n"
                    "    rows: 4\n"
                    "    cols: 1\n"
                    "    dt: f\n"
                    "    data: [ {data}]\n"
                    "   frames-enabled: [{frames}]\n".format(
                        data=", ".join(str(s) for s in track.bundle.to_4d()),
                        frames=", ".join(
                            str(marker.frame) for marker in track.markers
                            if not marker.mute)))

    f.close()

    return {'FINISHED'}
Exemplo n.º 22
0
    def exportGoZ(self, path, scn, ob, pathImport):
        import random
        if bpy.context.object.type == 'MESH':
            # create mesh if object is linked
            if bpy.context.object.library:
                new_ob = ob.copy()
                new_ob.data = ob.data.copy()
                scn.objects.link(new_ob)
                new_ob.select = True
                ob.select = False
                scn.objects.active = new_ob

        me = ob.to_mesh(scn, True, 'RENDER')
        mat_transform = mathutils.Matrix([(1., 0., 0., 0.), (0., 0., -1., 0.),
                                          (0., 1., 0., 0.), (0., 0., 0., 1.)])
        fic = open(pathImport + '/{0}.GoZ'.format(ob.name), 'wb')
        fic.write(b"GoZb 1.0 ZBrush GoZ Binary")
        fic.write(pack('<6B', 0x2E, 0x2E, 0x2E, 0x2E, 0x2E, 0x2E))
        fic.write(pack('<I', 1))  #obj tag
        fic.write(pack('<I', len(ob.name) + 24))
        fic.write(pack('<Q', 1))
        fic.write(b'GoZMesh_' + ob.name.encode('U8'))
        fic.write(pack('<4B', 0x89, 0x13, 0x00, 0x00))
        fic.write(pack('<I', 20))
        fic.write(pack('<Q', 1))
        fic.write(pack('<I', 0))
        nbFaces = len(me.tessfaces)
        nbVertices = len(me.vertices)
        fic.write(pack('<4B', 0x11, 0x27, 0x00, 0x00))
        fic.write(pack('<I', nbVertices * 3 * 4 + 16))
        fic.write(pack('<Q', nbVertices))
        for vert in me.vertices:
            modif_coo = ob.matrix_world * vert.co
            modif_coo = mat_transform * modif_coo
            fic.write(pack('<3f', modif_coo[0], modif_coo[1], modif_coo[2]))
        fic.write(pack('<4B', 0x21, 0x4E, 0x00, 0x00))
        fic.write(pack('<I', nbFaces * 4 * 4 + 16))
        fic.write(pack('<Q', nbFaces))
        for face in me.tessfaces:
            if len(face.vertices) == 4:
                fic.write(
                    pack('<4I', face.vertices[0], face.vertices[1],
                         face.vertices[2], face.vertices[3]))
            elif len(face.vertices) == 3:
                fic.write(
                    pack('<3I4B', face.vertices[0], face.vertices[1],
                         face.vertices[2], 0xFF, 0xFF, 0xFF, 0xFF))
        # --UVs--
        if me.tessface_uv_textures.active:
            uvdata = me.tessface_uv_textures.active.data
            fic.write(pack('<4B', 0xA9, 0x61, 0x00, 0x00))
            fic.write(pack('<I', len(uvdata) * 4 * 2 * 4 + 16))
            fic.write(pack('<Q', len(uvdata)))
            for uvface in uvdata:
                fic.write(
                    pack('<8f', uvface.uv_raw[0], 1. - uvface.uv_raw[1],
                         uvface.uv_raw[2], 1. - uvface.uv_raw[3],
                         uvface.uv_raw[4], 1. - uvface.uv_raw[5],
                         uvface.uv_raw[6], 1. - uvface.uv_raw[7]))
        # --Polypainting--
        if me.tessface_vertex_colors.active:
            vcoldata = me.tessface_vertex_colors.active.data
            vcolArray = bytearray([0] * nbVertices * 3)
            for i in range(len((vcoldata))):
                vcolArray[me.tessfaces[i].vertices[0] * 3] = int(
                    255 * vcoldata[i].color1[0])
                vcolArray[me.tessfaces[i].vertices[0] * 3 + 1] = int(
                    255 * vcoldata[i].color1[1])
                vcolArray[me.tessfaces[i].vertices[0] * 3 + 2] = int(
                    255 * vcoldata[i].color1[2])
                vcolArray[me.tessfaces[i].vertices[1] * 3] = int(
                    255 * vcoldata[i].color2[0])
                vcolArray[me.tessfaces[i].vertices[1] * 3 + 1] = int(
                    255 * vcoldata[i].color2[1])
                vcolArray[me.tessfaces[i].vertices[1] * 3 + 2] = int(
                    255 * vcoldata[i].color2[2])
                vcolArray[me.tessfaces[i].vertices[2] * 3] = int(
                    255 * vcoldata[i].color3[0])
                vcolArray[me.tessfaces[i].vertices[2] * 3 + 1] = int(
                    255 * vcoldata[i].color3[1])
                vcolArray[me.tessfaces[i].vertices[2] * 3 + 2] = int(
                    255 * vcoldata[i].color3[2])
                if len(me.tessfaces[i].vertices) == 4:
                    vcolArray[me.tessfaces[i].vertices[3] * 3] = int(
                        255 * vcoldata[i].color4[0])
                    vcolArray[me.tessfaces[i].vertices[3] * 3 + 1] = int(
                        255 * vcoldata[i].color4[1])
                    vcolArray[me.tessfaces[i].vertices[3] * 3 + 2] = int(
                        255 * vcoldata[i].color4[2])
            fic.write(pack('<4B', 0xb9, 0x88, 0x00, 0x00))
            fic.write(pack('<I', nbVertices * 4 + 16))
            fic.write(pack('<Q', nbVertices))
            for i in range(0, len(vcolArray), 3):
                fic.write(pack('<B', vcolArray[i + 2]))
                fic.write(pack('<B', vcolArray[i + 1]))
                fic.write(pack('<B', vcolArray[i]))
                fic.write(pack('<B', 0))
            del vcolArray
        # --Mask--
        for vertexGroup in ob.vertex_groups:
            if vertexGroup.name.lower() == 'mask':
                fic.write(pack('<4B', 0x32, 0x75, 0x00, 0x00))
                fic.write(pack('<I', nbVertices * 2 + 16))
                fic.write(pack('<Q', nbVertices))
                for i in range(nbVertices):
                    try:
                        fic.write(
                            pack('<H', int(
                                (1. - vertexGroup.weight(i)) * 65535)))
                    except:
                        fic.write(pack('<H', 255))
                break
        # --Polygroups--
        vertWeight = []
        for i in range(len(me.vertices)):
            vertWeight.append([])
            for group in me.vertices[i].groups:
                try:
                    if group.weight == 1. and ob.vertex_groups[
                            group.group].name.lower() != 'mask':
                        vertWeight[i].append(group.group)
                except:
                    print('error reading vertex group data')
        fic.write(pack('<4B', 0x41, 0x9C, 0x00, 0x00))
        fic.write(pack('<I', nbFaces * 2 + 16))
        fic.write(pack('<Q', nbFaces))
        numrand = random.randint(1, 40)
        for face in me.tessfaces:
            gr = []
            for vert in face.vertices:
                gr.extend(vertWeight[vert])
            gr.sort()
            gr.reverse()
            tmp = {}
            groupVal = 0
            for val in gr:
                if val not in tmp:
                    tmp[val] = 1
                else:
                    tmp[val] += 1
                    if tmp[val] == len(face.vertices):
                        groupVal = val
                        break
            if ob.vertex_groups.items() != []:
                grName = ob.vertex_groups[groupVal].name
                if grName.lower() == 'mask':
                    fic.write(pack('<H', 0))
                else:
                    grName = ob.vertex_groups[groupVal].index * numrand
                    fic.write(pack('<H', grName))
            else:
                fic.write(pack('<H', 0))
        # Diff, disp and nm maps
        diff = 0
        disp = 0
        nm = 0
        GoBmat = False
        for matslot in ob.material_slots:
            if matslot.material:
                GoBmat = matslot
                break

        try:
            if GoBmat:
                for texslot in GoBmat.material.texture_slots:
                    if texslot:
                        if texslot.texture:
                            if texslot.texture.type == 'IMAGE' and texslot.texture_coords == 'UV' and texslot.texture.image:
                                if texslot.use_map_color_diffuse:
                                    diff = texslot
                                if texslot.use_map_displacement: disp = texslot
                                if texslot.use_map_normal: nm = texslot
            formatRender = scn.render.image_settings.file_format
            scn.render.image_settings.file_format = 'BMP'
            if diff:
                name = diff.texture.image.filepath.replace('\\', '/')
                name = name.rsplit('/')[-1]
                name = name.rsplit('.')[0]
                if len(name) > 5:
                    if name[-5:] == "_TXTR":
                        name = path + '/GoZProjects/Default/' + name + '.bmp'
                    else:
                        name = path + '/GoZProjects/Default/' + name + '_TXTR.bmp'
                diff.texture.image.save_render(name)
                print(name)
                name = name.encode('utf8')
                fic.write(pack('<4B', 0xc9, 0xaf, 0x00, 0x00))
                fic.write(pack('<I', len(name) + 16))
                fic.write(pack('<Q', 1))
                fic.write(pack('%ss' % len(name), name))
            if disp:
                name = disp.texture.image.filepath.replace('\\', '/')
                name = name.rsplit('/')[-1]
                name = name.rsplit('.')[0]
                if len(name) > 3:
                    if name[-3:] == "_DM":
                        name = path + '/GoZProjects/Default/' + name + '.bmp'
                    else:
                        name = path + '/GoZProjects/Default/' + name + '_DM.bmp'
                disp.texture.image.save_render(name)
                print(name)
                name = name.encode('utf8')
                fic.write(pack('<4B', 0xd9, 0xd6, 0x00, 0x00))
                fic.write(pack('<I', len(name) + 16))
                fic.write(pack('<Q', 1))
                fic.write(pack('%ss' % len(name), name))
            if nm:
                name = nm.texture.image.filepath.replace('\\', '/')
                name = name.rsplit('/')[-1]
                name = name.rsplit('.')[0]
                if len(name) > 3:
                    if name[-3:] == "_NM":
                        name = path + '/GoZProjects/Default/' + name + '.bmp'
                    else:
                        name = path + '/GoZProjects/Default/' + name + '_NM.bmp'
                nm.texture.image.save_render(name)
                print(name)
                name = name.encode('utf8')
                fic.write(pack('<4B', 0x51, 0xc3, 0x00, 0x00))
                fic.write(pack('<I', len(name) + 16))
                fic.write(pack('<Q', 1))
                fic.write(pack('%ss' % len(name), name))
            # fin
        except:
            # continue even when no textures are found
            pass

        scn.render.image_settings.file_format = formatRender
        fic.write(pack('16x'))
        fic.close()
        bpy.data.meshes.remove(me)
        return
Exemplo n.º 23
0
    def execute(self, context):
        selected_objects = context.selected_objects
        uv_obj = context.scene.objects.active
        wrap_name = uv_obj.name.replace('_WRAP', '')

        if len(selected_objects) < 2:
            self.report({'WARNING'}, "Select more objects")
            return {'CANCELLED'}

        if uv_obj not in selected_objects or '_WRAP' not in uv_obj.name:
            self.report({'WARNING'}, "Select WRAP object at the end of selection")
            return {'CANCELLED'}

        if not wrap_name in context.scene.objects:
            self.report({'WARNING'}, "No object " + wrap_name)
            return {'CANCELLED'}

        wrap_obj = context.scene.objects[wrap_name]

        if len(wrap_obj.data.polygons) != len(uv_obj.data.polygons):
            self.report({'WARNING'}, "Object " + wrap_name + " and object " + uv_obj.name + " have different faces count")
            return {'CANCELLED'}

        bvh = mathu.bvhtree.BVHTree.FromObject(uv_obj, context.scene)

        uv_matrix = uv_obj.matrix_world
        uv_matrix_inv = uv_matrix.inverted()
        wrap_matrix = wrap_obj.matrix_world
        wrap_matrix_inv = wrap_matrix.inverted()

        for the_obj in selected_objects:
            if the_obj != uv_obj:

                if self.copy_objects:
                    # create new object
                    new_mesh = the_obj.to_mesh(scene=context.scene, apply_modifiers=True, settings='PREVIEW')
                    new_obj = bpy.data.objects.new(wrap_obj.name + '_WRAP', new_mesh)
                    new_obj.select = True
                    context.scene.objects.link(new_obj)
                    new_obj.matrix_world = the_obj.matrix_world
                    new_obj.data.update()

                    final_obj = new_obj
                else:
                    final_obj = the_obj

                # all verts
                if self.transform_objects:
                    all_verts = [final_obj.location]
                else:
                    all_verts = []

                    if final_obj.type == 'MESH':
                        if final_obj.data.shape_keys:
                            all_verts = final_obj.data.shape_keys.key_blocks[final_obj.active_shape_key_index].data
                        else:
                            all_verts = final_obj.data.vertices

                    elif final_obj.type == 'CURVE':
                        if final_obj.data.shape_keys:
                            all_verts = final_obj.data.shape_keys.key_blocks[final_obj.active_shape_key_index].data
                        else:
                            for spline in final_obj.data.splines:
                                if spline.type == 'BEZIER':
                                    for point in spline.bezier_points:
                                        all_verts.append(point)
                                else:
                                    for point in spline.points:
                                        all_verts.append(point)

                # wrap main code
                for vert in all_verts:
                    if self.transform_objects:
                        vert_pos = vert  # here vert is just object's location
                    else:
                        if final_obj.type == 'CURVE':
                            vert_pos = Vector((vert.co[0], vert.co[1], vert.co[2]))
                            vert_pos = final_obj.matrix_world * vert_pos
                        else:
                            vert_pos = final_obj.matrix_world * vert.co.copy()

                    # near
                    vert_pos_zero = vert_pos.copy()
                    vert_pos_zero[1] = uv_obj.location[1]
                    vert_pos_zero = uv_obj.matrix_world.inverted() * vert_pos_zero
                    nearest = bvh.find_nearest(vert_pos_zero)

                    if nearest and nearest[2] is not None:
                        near_face = uv_obj.data.polygons[nearest[2]]
                        near_center = uv_obj.matrix_world * near_face.center

                        near_axis1 = ut_base.get_normal_world(near_face.normal, uv_matrix, uv_matrix_inv)

                        near_v1 = uv_obj.matrix_world * uv_obj.data.vertices[near_face.vertices[0]].co
                        near_v2 = uv_obj.matrix_world * uv_obj.data.vertices[near_face.vertices[1]].co
                        near_axis2 = (near_v1 - near_v2).normalized()

                        near_axis3 = near_axis1.cross(near_axis2).normalized()

                        dist_1 = mathu.geometry.distance_point_to_plane(vert_pos, near_center, near_axis1)
                        dist_2 = mathu.geometry.distance_point_to_plane(vert_pos, near_center, near_axis2)
                        dist_3 = mathu.geometry.distance_point_to_plane(vert_pos, near_center, near_axis3)

                        # wrap
                        wrap_face = wrap_obj.data.polygons[nearest[2]]
                        wrap_center = wrap_obj.matrix_world * wrap_face.center

                        wrap_axis1 = ut_base.get_normal_world(wrap_face.normal, wrap_matrix, wrap_matrix_inv)

                        wrap_v1 = wrap_obj.matrix_world * wrap_obj.data.vertices[wrap_face.vertices[0]].co
                        wrap_v2 = wrap_obj.matrix_world * wrap_obj.data.vertices[wrap_face.vertices[1]].co
                        wrap_axis2 = (wrap_v1 - wrap_v2).normalized()

                        wrap_axis3 = wrap_axis1.cross(wrap_axis2).normalized()

                        # move to face
                        relative_scale = (wrap_v1 - wrap_center).length / (near_v1 - near_center).length
                        new_vert_pos = wrap_center + (wrap_axis2 * dist_2 * relative_scale) + (wrap_axis3 * dist_3 * relative_scale)

                        # interpolate between Face Normal and Point Normal
                        if self.deform_normal == 'FaceAndVert':
                            vert2_min = None
                            vert2_min_dist = None
                            vert2_pos_world = None
                            for vert2_id in wrap_face.vertices:
                                vert2 = wrap_obj.data.vertices[vert2_id]
                                vert2_pos_world = wrap_obj.matrix_world * vert2.co
                                v2_dist = (vert2_pos_world - new_vert_pos).length

                                if not vert2_min:
                                    vert2_min = vert2
                                    vert2_min_dist = v2_dist
                                elif vert2_min_dist > v2_dist:
                                    vert2_min = vert2
                                    vert2_min_dist = v2_dist

                            vert2_min_nor = ut_base.get_normal_world(vert2_min.normal, wrap_matrix, wrap_matrix_inv)

                            mix_val = 0.0
                            mix_v1 = (new_vert_pos - wrap_center).length
                            mix_v2 = (vert2_pos_world - wrap_center).length
                            if mix_v2 != 0:
                                mix_val = min(mix_v1 / mix_v2, 1.0)

                            wrap_normal = wrap_axis1.lerp(vert2_min_nor, mix_val).normalized()

                        # Take just Face Normal
                        else:
                            wrap_normal = wrap_axis1

                        if self.normal_offset == 0:
                            normal_dist = dist_1 * relative_scale
                        else:
                            normal_dist = dist_1 * self.normal_offset

                        # Add normal direction to position
                        new_vert_pos += (wrap_normal * normal_dist)

                        # Mesh Vertex Transform!
                        if not self.transform_objects:
                            if final_obj.type == 'CURVE':
                                new_vert_pos_world = final_obj.matrix_world.inverted() * new_vert_pos
                                vert.co[0] = new_vert_pos_world[0]
                                vert.co[1] = new_vert_pos_world[1]
                                vert.co[2] = new_vert_pos_world[2]
                            else:
                                vert.co = final_obj.matrix_world.inverted() * new_vert_pos

                        # Object Transform
                        else:
                            if self.normal_offset == 0:
                                final_obj_scale = final_obj.scale * relative_scale
                            else:
                                final_obj_scale = final_obj.scale * self.normal_offset

                            final_matrix = final_obj.matrix_world
                            final_obj_axis1 = vert_pos + Vector((final_matrix[0][0], final_matrix[1][0], final_matrix[2][0])).normalized()
                            # we substract here because Y axis is negative
                            final_obj_axis2 = vert_pos - Vector((final_matrix[0][1], final_matrix[1][1], final_matrix[2][1])).normalized()

                            ax1_dist_1 = mathu.geometry.distance_point_to_plane(final_obj_axis1, near_center, near_axis1)
                            ax1_dist_2 = mathu.geometry.distance_point_to_plane(final_obj_axis1, near_center, near_axis2)
                            ax1_dist_3 = mathu.geometry.distance_point_to_plane(final_obj_axis1, near_center, near_axis3)
                            
                            ax2_dist_1 = mathu.geometry.distance_point_to_plane(final_obj_axis2, near_center, near_axis1)
                            ax2_dist_2 = mathu.geometry.distance_point_to_plane(final_obj_axis2, near_center, near_axis2)
                            ax2_dist_3 = mathu.geometry.distance_point_to_plane(final_obj_axis2, near_center, near_axis3)

                            ax1_normal_dist = ax1_dist_1 * relative_scale
                            ax2_normal_dist = ax2_dist_1 * relative_scale

                            ax1_vert_pos = wrap_center + (wrap_axis2 * ax1_dist_2 * relative_scale) + (wrap_axis3 * ax1_dist_3 * relative_scale)
                            ax1_vert_pos += (wrap_normal * ax1_normal_dist)
                            ax2_vert_pos = wrap_center + (wrap_axis2 * ax2_dist_2 * relative_scale) + (wrap_axis3 * ax2_dist_3 * relative_scale)
                            ax2_vert_pos += (wrap_normal * ax2_normal_dist)

                            final_obj_vec1 = (ax1_vert_pos - new_vert_pos).normalized()
                            final_obj_vec2 = (ax2_vert_pos - new_vert_pos).normalized()
                            final_obj_vec3 = final_obj_vec1.cross(final_obj_vec2).normalized()
                            final_obj_vec1 = final_obj_vec3.cross(final_obj_vec2).normalized()

                            final_mat = mathu.Matrix().to_3x3()
                            final_mat[0][0], final_mat[1][0], final_mat[2][0] = final_obj_vec1[0], final_obj_vec1[1], final_obj_vec1[2]
                            final_mat[0][1], final_mat[1][1], final_mat[2][1] = final_obj_vec2[0], final_obj_vec2[1], final_obj_vec2[2]
                            final_mat[0][2], final_mat[1][2], final_mat[2][2] = final_obj_vec3[0], final_obj_vec3[1], final_obj_vec3[2]
                            #final_mat = final_mat.normalized()

                            final_obj.matrix_world = final_mat.to_4x4()
                            #final_obj.rotation_euler = final_mat.to_euler()

                            # position and scale
                            final_obj.scale = final_obj_scale
                            final_obj.location = new_vert_pos


                if final_obj.type == 'MESH' and not self.transform_objects:
                    final_obj.data.update()

        return {'FINISHED'}
Exemplo n.º 24
0
matSynapse.diffuse_color = (1.0, 1.0, 0.9)
#matSynapse.use_transparency = True
matSynapse.use_nodes = True
#import pdb
#pdb.set_trace()

# Make synapses glow
emission = matSynapse.node_tree.nodes.new('ShaderNodeEmission')
emission.inputs['Strength'].default_value = 5.0

material_output = matSynapse.node_tree.nodes.get('Material Output')
matSynapse.node_tree.links.new(material_output.inputs[0], emission.outputs[0])

for ps, rt, mo, nm, nID in zip(pos, rot, morph, name, neuronID):

    eRot = mathutils.Matrix(rt.reshape(3, 3)).to_euler()
    bpy.ops.import_mesh.swc(filepath=snudda_parse_path(mo))
    obj = bpy.context.selected_objects[0]

    obj.rotation_euler = eRot
    print("Setting position: " + str(ps * 1e3))
    obj.location = ps * 1e3

    nType = nm.decode().split("_")[0]
    if (nType == "dSPN" or nType == "MSD1"):
        print(str(nID) + " dSPN")
        mat = matMSD1
    elif (nType == "iSPN" or nType == "MSD2"):
        print(str(nID) + " iSPN")
        mat = matMSD2
    elif (nType == "FSN"):
Exemplo n.º 25
0
from .supplement_xml.supplement_xml import MorphOffsets as XMLMorphOffsets
from .supplement_xml.supplement_xml import GroupMorphOffset as XMLGroupMorphOffset
from .supplement_xml.supplement_xml import MaterialMorphOffset as XMLMaterialMorphOffset
from .supplement_xml.supplement_xml import BoneMorphOffset as XMLBoneMorphOffset
from .supplement_xml.supplement_xml import Rotate as XMLRotate

# global_variable
GV = global_variable.Init()

# DEBUG = True

# def Echo(data):
#     if DEBUG:
#         print(data)

GlobalMatrix = Math.Matrix(([1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0,
                                                         0], [0, 0, 0, 0.2]))


def GT(vec, mat):  # GlobalTransformation
    v = vec.copy()
    v.resize_4d()

    w = GlobalMatrix @ mat @ v
    w = w / w.w
    w.resize_3d()
    return w


def GT_normal(vec, mat):  # GlobalTransformation
    v = vec.copy()
    v.resize_4d()
Exemplo n.º 26
0
def save(context, filepath="", frame_start=1, frame_end=300, fps=25.0, use_rest_frame=False):
    """
    Blender.Window.WaitCursor(1)

    mesh_orig = Mesh.New()
    mesh_orig.getFromObject(obj.name)
    """

    scene = context.scene
    obj = context.object

    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT')

    orig_frame = scene.frame_current
    scene.frame_set(frame_start)
    depsgraph = context.evaluated_depsgraph_get()
    obj_eval = obj.evaluated_get(depsgraph)
    me = obj_eval.to_mesh()

    #Flip y and z
    '''
    mat_flip = mathutils.Matrix(((1.0, 0.0, 0.0, 0.0),
                                 (0.0, 0.0, 1.0, 0.0),
                                 (0.0, 1.0, 0.0, 0.0),
                                 (0.0, 0.0, 0.0, 1.0),
                                 ))
    '''
    mat_flip = mathutils.Matrix()

    numverts = len(me.vertices)

    numframes = frame_end - frame_start + 1
    if use_rest_frame:
        numframes += 1

    f = open(filepath, 'wb')  # no Errors yet:Safe to create file

    # Write the header
    f.write(pack(">2i", numframes, numverts))

    # Write the frame times (should we use the time IPO??)
    f.write(pack(">%df" % (numframes), *[frame / fps for frame in range(numframes)]))  # seconds

    if use_rest_frame:
        check_vertcount(me, numverts)
        me.transform(mat_flip @ obj.matrix_world)
        f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))

    obj_eval.to_mesh_clear()

    for frame in range(frame_start, frame_end + 1):  # in order to start at desired frame
        scene.frame_set(frame)
        depsgraph = context.evaluated_depsgraph_get()
        obj_eval = obj.evaluated_get(depsgraph)
        me = obj_eval.to_mesh()
        check_vertcount(me, numverts)
        me.transform(mat_flip @ obj.matrix_world)

        # Write the vertex data
        f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))

        obj_eval.to_mesh_clear()

    f.close()

    print('MDD Exported: %r frames:%d\n' % (filepath, numframes - 1))
    scene.frame_set(orig_frame)

    return {'FINISHED'}
Exemplo n.º 27
0
 def set_parent_inverse(self, parent_inverse):
     """Blender interpolate is matrix_basis, it needs to left multiply
     its parent's object.matrix_parent_inverse to get
     matrix_local(parent space transform)"""
     self.parent_trans_inverse = mathutils.Matrix(parent_inverse)
Exemplo n.º 28
0
    def __init__(self, uri, report, default_surface=(0.0, 0.0, 0.0)):

        self._rwx_clump_stack = []
        self._rwx_proto_dict = {}
        self._current_scope = None
        self._transform_stack = []
        self._transform_saves = []
        self._current_transform = mu.Matrix.Identity(4)

        transform_before_proto = None

        # Ready root object group
        self._rwx_clump_stack.append(RwxObject(os.path.basename(uri)))
        self._current_scope = self._rwx_clump_stack[-1]
        self._current_scope.state.surface = default_surface
        self._push_current_transform()

        rwx_file = open(uri, mode = 'r')

        try:
            lines = rwx_file.readlines()
        except UnicodeDecodeError:
            report({'WARNING'}, "Failed to open file using local encoding, trying cp437 (Windows/DOS) instead")

            lines = open(uri, mode = 'r', encoding = 'cp437').readlines()

        res = None
        for line in lines:
            if line[0] == '#':
                # The whole line is a comment: we can safely ditch it
                continue

            # Strip comment away
            res = self._non_comment_regex.match(line)
            if res:
                line = res.group(1)

            # Replace tabs with spaces
            line = line.replace('\t', ' ').strip()

            res = self._clumpbegin_regex.match(line)
            if res:
                self._push_current_transform()
                rwx_clump = RwxClump(state = self._current_scope.state)
                self._rwx_clump_stack[-1].clumps.append(rwx_clump)
                self._rwx_clump_stack.append(rwx_clump)
                self._current_scope = rwx_clump
                continue

            res = self._clumpend_regex.match(line)
            if res:
                self._pop_current_transform()
                self._rwx_clump_stack.pop()
                self._current_scope = self._rwx_clump_stack[-1]
                continue

            res = self._transformbegin_regex.match(line)
            if res:
                self._save_current_transform()

            res = self._transformend_regex.match(line)
            if res:
                self._load_current_transform()

            res = self._protobegin_regex.match(line)
            if res:
                name = res.group(2)
                self._rwx_proto_dict[name] = RwxScope(state = self._current_scope.state)
                self._current_scope = self._rwx_proto_dict[name]
                transform_before_proto = self._current_transform.copy()
                self._current_transform = mu.Matrix.Identity(4);
                continue

            res = self._protoend_regex.match(line)
            if res:
                self._current_scope = self._rwx_clump_stack[0]
                self._current_transform = transform_before_proto
                continue

            res = self._protoinstance_regex.match(line)
            if res:
                name = res.group(2)
                self._current_scope.apply_proto(self._rwx_proto_dict[name], self._final_transform)
                continue

            res = self._texture_regex.match(line)
            if res:
                self._current_scope.state.texture = None if res.group(2).lower() == "null" else res.group(2)
                self._current_scope.state.mask = res.group(4)
                continue

            res = self._triangle_regex.match(line)
            if res:
                v_id = [ int(x) for x in self._integer_regex.findall(res.group(2)) ]
                self._current_scope.shapes.append(RwxTriangle(v_id[0], v_id[1], v_id[2],\
                                                                  state=self._current_scope.state))
                continue

            res = self._quad_regex.match(line)
            if res:
                v_id = [ int(x) for x in self._integer_regex.findall(res.group(2)) ]
                self._current_scope.shapes.append(RwxQuad(v_id[0], v_id[1], v_id[2], v_id[3],\
                                                              state=self._current_scope.state))
                continue

            res = self._polygon_regex.match(line)
            if res:
                v_len = int(self._integer_regex.findall(res.group(2))[0])
                v_id = [ int(x) for x in self._integer_regex.findall(res.group(3)) ]
                self._current_scope.shapes.append(RwxPolygon(v_id[0:v_len],\
                                                                 state=self._current_scope.state))
                continue

            res = self._vertex_regex.match(line)
            if res:
                vprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if res.group(7):
                    vprops.extend([ float(x[0]) for x in self._float_regex.findall(res.group(7)) ])
                    self._current_scope.vertices.append(RwxVertex(vprops[0], vprops[1], vprops[2],
                                                                  self._final_transform,
                                                                  u = vprops[3],
                                                                  v = vprops[4]))
                else:
                    self._current_scope.vertices.append(RwxVertex(vprops[0], vprops[1], vprops[2],
                                                                  self._final_transform))
                continue

            res = self._color_regex.match(line)
            if res:
                cprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if len(cprops) == 3:
                    self._current_scope.state.color = tuple(cprops)
                continue

            res = self._opacity_regex.match(line)
            if res:
                self._current_scope.state.opacity = float(res.group(2))
                continue

            res = self._transform_regex.match(line)
            if res:
                tprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if len(tprops) == 16:
                    # Important Note: it seems the AW client always acts as if this element
                    # (which is related to the projection plane) was equal to 1 when it was
                    # set 0, hence why we always override this.
                    if tprops[15] == 0.0:
                        tprops[15] = 1

                    self._current_transform = mu.Matrix(list(zip(*[iter(tprops)]*4))).transposed()
                continue

            res = self._translate_regex.match(line)
            if res:
                tprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                self._current_transform = self._current_transform @ mu.Matrix.Translation(mu.Vector(tprops))
                continue

            res = self._rotate_regex.match(line)
            if res:
                rprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if len(rprops) == 4:
                    if rprops[0]:
                        self._current_transform =\
                            self._current_transform @ mu.Matrix.Rotation(radians(rprops[3]*rprops[0]), 4, 'X')
                    if rprops[1]:
                        self._current_transform =\
                            self._current_transform @ mu.Matrix.Rotation(radians(rprops[3]*rprops[1]), 4, 'Y')
                    if rprops[2]:
                        self._current_transform =\
                            self._current_transform @ mu.Matrix.Rotation(radians(rprops[3]*rprops[2]), 4, 'Z')
                continue

            res = self._scale_regex.match(line)
            if res:
                sprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if len(sprops) == 3:
                    self._current_transform = self._current_transform @\
                        mu.Matrix.Scale(sprops[0], 4, (1.0, 0.0, 0.0)) @\
                        mu.Matrix.Scale(sprops[1], 4, (0.0, 1.0, 0.0)) @\
                        mu.Matrix.Scale(sprops[2], 4, (0.0, 0.0, 1.0))
                continue

            res = self._surface_regex.match(line)
            if res:
                sprops = [ float(x[0]) for x in self._float_regex.findall(res.group(2)) ]
                if len(sprops) == 3:
                    self._current_scope.state.surface = tuple(sprops)
                continue

            res = self._ambient_regex.match(line)
            if res:
                surf = self._current_scope.state.surface
                self._current_scope.state.surface = (float(res.group(2)), surf[1], surf[2])
                continue

            res = self._diffuse_regex.match(line)
            if res:
                surf = self._current_scope.state.surface
                self._current_scope.state.surface = (surf[0], float(res.group(2)), surf[2])
                continue

            res = self._specular_regex.match(line)
            if res:
                surf = self._current_scope.state.surface
                self._current_scope.state.surface = (surf[0], surf[1], float(res.group(2)))
                continue

            res = self._materialmode_regex.match(line)
            if res:
                mat_mode = res.group(4).lower()
                if mat_mode == "none":
                    self._current_scope.state.materialmode = MaterialMode.NONE
                elif mat_mode == "null":
                    self._current_scope.state.materialmode = MaterialMode.NULL
                elif mat_mode == "double":
                    self._current_scope.state.materialmode = MaterialMode.DOUBLE
                continue

            res = self._identity_regex.match(line)
            if res:
                self._current_transform = mu.Matrix.Identity(4)
                continue
Exemplo n.º 29
0
# blender modules
import bpy
import mathutils

# addon modules
from . import fmt
from .. import xray_io
from .. import utils

MATRIX_BONE = mathutils.Matrix(
    ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), (0.0, 1.0, 0.0, 0.0),
     (0.0, 0.0, 0.0, 1.0))).freeze()
MATRIX_BONE_INVERTED = MATRIX_BONE.inverted().freeze()


def motion_mark(packed_reader):
    name = packed_reader.gets_a()
    count = packed_reader.getf('I')[0]
    for index in range(count):
        interval_first = packed_reader.getf('f')[0]
        interval_second = packed_reader.getf('f')[0]


def examine_motions(data):
    motion_names = []
    chunked_reader = xray_io.ChunkedReader(data)
    for chunk_id, chunk_data in chunked_reader:
        if chunk_id == fmt.Chunks.S_SMPARAMS:
            packed_reader = xray_io.PackedReader(chunk_data)
            params_version = packed_reader.getf('H')[0]
            partition_count = packed_reader.getf('H')[0]
def psaimport(filename, context):
    global logf, logf
    print("--------------------------------------------------")
    print("---------SCRIPT EXECUTING PYTHON IMPORTER---------")
    print("--------------------------------------------------")
    print("Importing file: ", filename)
    psafile = open(filename, 'rb')
    debug = True
    if (debug):
        logpath = filename.replace(".psa", ".txt")
        print("logpath:", logpath)
        logf = open(logpath, 'w')

    def printlog(strdata):
        if (debug):
            logf.write(strdata)

    def printlogplus(name, data):
        if (debug):
            logf.write(str(name) + '\n')
            if isinstance(data, bytes):
                logf.write(str(
                    bytes.decode(data).strip(bytes.decode(b'\x00'))))
            else:
                logf.write(str(data))
            logf.write('\n')

    printlog('-----------Log File------------\n')
    # General Header
    indata = struct.unpack('20s3i', psafile.read(32))
    printlogplus('ChunkID', indata[0])
    printlogplus('TypeFlag', indata[1])
    printlogplus('DataSize', indata[2])
    printlogplus('DataCount', indata[3])
    # Bones Header
    indata = struct.unpack('20s3i', psafile.read(32))
    printlogplus('ChunkID', indata[0])
    printlogplus('TypeFlag', indata[1])
    printlogplus('DataSize', indata[2])
    printlogplus('DataCount', indata[3])
    # Bones Data
    BoneIndex2NamePairMap = {}
    BoneNotFoundList = []
    printlog(
        "Name|Flgs|NumChld|PrntIdx|Qx|Qy|Qz|Qw|LocX|LocY|LocZ|Length|XSize|YSize|ZSize\n"
    )
    recCount = indata[3]
    counter = 0
    nobonematch = True
    while counter < recCount:
        indata = struct.unpack('64s3i11f', psafile.read(120))
        # printlogplus('bone', indata[0])
        bonename = str(bytes.decode(indata[0]).strip(bytes.decode(b'\x00')))
        if bonename in bpy.data.armatures['armaturedata'].bones.keys():
            BoneIndex2NamePairMap[counter] = bonename
            print('find bone', bonename)
            nobonematch = False
        else:
            print('can not find the bone:', bonename)
            BoneNotFoundList.append(counter)
        counter += 1

    if nobonematch:
        print('no bone was match so skip import!')
        return

    # Animations Header
    indata = struct.unpack('20s3i', psafile.read(32))
    printlogplus('ChunkID', indata[0])
    printlogplus('TypeFlag', indata[1])
    printlogplus('DataSize', indata[2])
    printlogplus('DataCount', indata[3])
    # Animations Data
    recCount = indata[3]
    counter = 0
    Raw_Key_Nums = 0
    Action_List = []
    while counter < recCount:
        indata = struct.unpack('64s64s4i3f3i',
                               psafile.read(64 + 64 + 4 * 4 + 3 * 4 + 3 * 4))
        printlogplus('Name', indata[0])
        printlogplus('Group', indata[1])
        printlogplus('totalbones', indata[2])
        printlogplus('NumRawFrames', indata[-1])
        Name = str(bytes.decode(indata[0]).strip(bytes.decode(b'\x00')))
        Group = str(bytes.decode(indata[1]).strip(bytes.decode(b'\x00')))
        totalbones = indata[2]
        NumRawFrames = indata[-1]

        Raw_Key_Nums += indata[2] * indata[-1]
        Action_List.append((Name, Group, totalbones, NumRawFrames))

        counter += 1

    # Raw keys Header
    Raw_Key_List = []
    indata = struct.unpack('20s3i', psafile.read(32))
    printlogplus('ChunkID', indata[0])
    printlogplus('TypeFlag', indata[1])
    printlogplus('DataSize', indata[2])
    printlogplus('DataCount', indata[3])
    if (Raw_Key_Nums != indata[3]):
        print('error! Raw_Key_Nums Inconsistent')
        return
    # Raw keys Data
    recCount = Raw_Key_Nums
    counter = 0
    while counter < recCount:
        indata = struct.unpack('3f4f1f', psafile.read(3 * 4 + 4 * 4 + 4))
        pos = mathutils.Vector((indata[0], indata[1], indata[2]))
        quat = mathutils.Quaternion(
            (indata[6], indata[3], indata[4], indata[5]))
        time = indata[7]
        Raw_Key_List.append((pos, quat, time))
        counter += 1
    # Scale keys Header,Scale keys Data,Curve keys Header,Curve keys Data
    current_file_position = psafile.tell()
    psafile.seek(0, 2)
    end_file_position = psafile.tell()
    if current_file_position == end_file_position:
        print('no Scale keys,Curve keys')

    # build the animation line
    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT', toggle=False)

    needed_bone_matrix = {}
    armature_obj = 'ArmObject'
    armature_data = 'armaturedata'
    if bpy.context.scene.udk_importarmatureselect:
        if len(bpy.context.scene.udkas_list) > 0:
            print("CHECKING ARMATURE...")
            # for bone in bpy.data.objects[armature_obj].pose.bones:
            # for objd in bpy.data.objects:
            # print("NAME:", objd.name, " TYPE:", objd.type)
            # if objd.type == 'ARMATURE':
            # print(dir(objd))
            armature_list = bpy.context.scene.udkas_list  # armature list array
            armature_idx = bpy.context.scene.udkimportarmature_list_idx  # armature index selected
            armature_obj = bpy.data.objects[
                armature_list[armature_idx]].name  # object armature
            armature_data = bpy.data.objects[
                armature_list[armature_idx]].data.name  # object data

    for bone in bpy.data.armatures[armature_data].bones:
        name = bone.name
        ori_matrix = bone.matrix
        matrix = bone.matrix_local.to_3x3()
        bone_rest_matrix = mathutils.Matrix(matrix)
        # bone_rest_matrix = bone.matrix_local.to_3x3()
        # bone_rest_matrix = bone.matrix_local.to_quaternion().conjugated().to_matrix()
        bone_rest_matrix_inv = mathutils.Matrix(bone_rest_matrix)
        bone_rest_matrix_inv.invert()
        bone_rest_matrix_inv.resize_4x4()
        bone_rest_matrix.resize_4x4()
        needed_bone_matrix[name] = (bone_rest_matrix, bone_rest_matrix_inv,
                                    ori_matrix)

    # build tmp pose bone tree
    psa_bones = {}
    for bone in bpy.data.objects[armature_obj].pose.bones:
        _psa_bone = psa_bone()
        _psa_bone.name = bone.name
        _psa_bone.Transform = bone.matrix
        if bone.parent is not None:
            _psa_bone.parent = psa_bones[bone.parent.name]
        else:
            _psa_bone.parent = None
        psa_bones[bone.name] = _psa_bone

    raw_key_index = 0

    for raw_action in Action_List:
        Name = raw_action[0]
        Group = raw_action[1]
        Totalbones = raw_action[2]
        NumRawFrames = raw_action[3]
        context.scene.update()
        object = bpy.data.objects['ArmObject']
        object.animation_data_create()
        action = bpy.data.actions.new(name=Name)
        object.animation_data.action = action
        for i in range(NumRawFrames):
            context.scene.frame_set(i + 1)
            pose_bones = object.pose.bones
            for j in range(Totalbones):
                if j not in BoneNotFoundList:
                    bone_name = BoneIndex2NamePairMap[j]
                    pbone = psa_bones[bone_name]
                    pos = Raw_Key_List[raw_key_index][0]
                    quat = Raw_Key_List[raw_key_index][1]

                    mat = Matrix()
                    if pbone.parent is not None:
                        quat = quat.conjugated()
                        mat = mathutils.Matrix.Translation(
                            pos) * quat.to_matrix().to_4x4()
                        mat = pose_bones[bone_name].parent.matrix * mat
                        # mat = pbone.parent.Transform * mat
                    else:
                        mat = pbone.Transform * Matrix.Translation(
                            pos) * quat.to_matrix().to_4x4()

                    pose_bones[bone_name].matrix = mat
                    pbone.Transform = mat

                raw_key_index += 1

            # bpy.data.meshes[1]
            for bone in pose_bones:
                bone.matrix = psa_bones[bone.name].Transform
                bone.keyframe_insert("rotation_quaternion")
                bone.keyframe_insert("location")

            def whirlSingleBone(pose_bone, quat):
                bpy.context.scene.update()
                # record child's matrix and origin rotate
                hymat = Quaternion(
                    (0.707, -0.707, 0, 0)).inverted().to_matrix().to_4x4()
                children_infos = {}
                childrens = pose_bone.children
                for child in childrens:
                    armmat = bpy.data.armatures['armaturedata'].bones[
                        child.name].matrix.copy().to_4x4()
                    cmat = child.matrix.copy() * armmat.inverted(
                    ) * hymat.inverted()
                    pos = cmat.to_translation()
                    rotmat = cmat.to_3x3()
                    children_infos[child] = (armmat, pos, rotmat)

                # whirl this bone by quat
                pose_bone.matrix *= quat.to_matrix().to_4x4()
                pose_bone.keyframe_insert("location")
                pose_bone.keyframe_insert("rotation_quaternion")
                bpy.context.scene.update()
                # set back children bon to original position
                # reverse whirl child bone by quat.inverse()

                for child in childrens:
                    armmat = children_infos[child][0]
                    pos = children_infos[child][1]
                    rotmat = children_infos[child][2]

                    child.matrix = Matrix.Translation(
                        pos) * rotmat.to_4x4() * hymat * armmat
                    child.keyframe_insert("location")
                    child.keyframe_insert("rotation_quaternion")

            for bone in pose_bones:
                if bone.parent is not None:
                    whirlSingleBone(bone, Quaternion((0.707, 0, 0, -0.707)))
                else:
                    bone.rotation_quaternion *= Quaternion(
                        (0.707, -0.707, 0, 0)) * Quaternion(
                            (0.707, 0, 0, -0.707))
                    bone.keyframe_insert("rotation_quaternion")

        break

    context.scene.frame_set(0)
    if debug:
        logf.close()