def create_mesh(gltf, mesh_idx, skin_idx):
    pymesh = gltf.data.meshes[mesh_idx]

    import_user_extensions('gather_import_mesh_before_hook', gltf, pymesh)

    name = pymesh.name or 'Mesh_%d' % mesh_idx
    mesh = bpy.data.meshes.new(name)

    # Temporarily parent the mesh to an object.
    # This is used to set skin weights and shapekeys.
    tmp_ob = None
    try:
        tmp_ob = bpy.data.objects.new('##gltf-import:tmp-object##', mesh)
        do_primitives(gltf, mesh_idx, skin_idx, mesh, tmp_ob)
        set_extras(mesh,
                   gltf.data.meshes[mesh_idx].extras,
                   exclude=['targetNames'])

    finally:
        if tmp_ob:
            bpy.data.objects.remove(tmp_ob)

    import_user_extensions('gather_import_mesh_after_hook', gltf, pymesh, mesh)

    return mesh
    def create_vnode(gltf, vnode_id):
        """Create VNode and all its descendants."""
        vnode = gltf.vnodes[vnode_id]

        gltf.display_current_node += 1
        if bpy.app.debug_value == 101:
            gltf.log.critical("Node %d of %d (id %s)", gltf.display_current_node, len(gltf.vnodes), vnode_id)

        if vnode.type == VNode.Object:
            gltf_node = gltf.data.nodes[vnode_id] if isinstance(vnode_id, int) else None
            import_user_extensions('gather_import_node_before_hook', gltf, vnode, gltf_node)
            obj = BlenderNode.create_object(gltf, vnode_id)
            import_user_extensions('gather_import_node_after_hook', gltf, vnode, gltf_node, obj)
            if vnode.is_arma:
                BlenderNode.create_bones(gltf, vnode_id)

        elif vnode.type == VNode.Bone:
            # These are created with their armature
            pass

        elif vnode.type == VNode.DummyRoot:
            # Don't actually create this
            vnode.blender_object = None

        for child in vnode.children:
            BlenderNode.create_vnode(gltf, child)
Пример #3
0
    def create(gltf, material_idx, vertex_color):
        """Material creation."""
        pymaterial = gltf.data.materials[material_idx]

        import_user_extensions('gather_import_material_before_hook', gltf,
                               pymaterial, vertex_color)

        name = pymaterial.name
        if name is None:
            name = "Material_" + str(material_idx)

        mat = bpy.data.materials.new(name)
        pymaterial.blender_material[vertex_color] = mat.name

        set_extras(mat, pymaterial.extras)
        BlenderMaterial.set_double_sided(pymaterial, mat)
        BlenderMaterial.set_alpha_mode(pymaterial, mat)
        BlenderMaterial.set_viewport_color(pymaterial, mat, vertex_color)

        mat.use_nodes = True
        while mat.node_tree.nodes:  # clear all nodes
            mat.node_tree.nodes.remove(mat.node_tree.nodes[0])

        mh = MaterialHelper(gltf, pymaterial, mat, vertex_color)

        exts = pymaterial.extensions or {}
        if 'KHR_materials_unlit' in exts:
            unlit(mh)
        elif 'KHR_materials_pbrSpecularGlossiness' in exts:
            pbr_specular_glossiness(mh)
        else:
            pbr_metallic_roughness(mh)

        import_user_extensions('gather_import_material_after_hook', gltf,
                               pymaterial, vertex_color, mat)
Пример #4
0
    def create(gltf, vnode, light_id):
        """Light creation."""
        pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][
            light_id]

        import_user_extensions('gather_import_light_before_hook', gltf, vnode,
                               pylight)

        if pylight['type'] == "directional":
            light = BlenderLight.create_directional(gltf, light_id)
        elif pylight['type'] == "point":
            light = BlenderLight.create_point(gltf, light_id)
        elif pylight['type'] == "spot":
            light = BlenderLight.create_spot(gltf, light_id)

        if 'color' in pylight.keys():
            light.color = pylight['color']

        if 'intensity' in pylight.keys():
            light.energy = pylight['intensity']

        # TODO range

        set_extras(light, pylight.get('extras'))

        return light
Пример #5
0
    def create(gltf, img_idx):
        """Image creation."""
        img = gltf.data.images[img_idx]

        if img.blender_image_name is not None:
            # Image is already used somewhere
            return

        import_user_extensions('gather_import_image_before_hook', gltf, img)

        if img.uri is not None and not img.uri.startswith('data:'):
            blender_image = create_from_file(gltf, img_idx)
        else:
            blender_image = create_from_data(gltf, img_idx)

        if blender_image:
            img.blender_image_name = blender_image.name

        import_user_extensions('gather_import_image_after_hook', gltf, img, blender_image)
    def create(gltf):
        """Scene creation."""
        scene = bpy.context.scene
        gltf.blender_scene = scene.name
        if bpy.context.collection.name in bpy.data.collections:  # avoid master collection
            gltf.blender_active_collection = bpy.context.collection.name
        if scene.render.engine not in ['CYCLES', 'BLENDER_EEVEE']:
            scene.render.engine = "BLENDER_EEVEE"

        if gltf.data.scene is not None:
            import_user_extensions('gather_import_scene_before_hook', gltf,
                                   gltf.data.scenes[gltf.data.scene], scene)
            pyscene = gltf.data.scenes[gltf.data.scene]
            set_extras(scene, pyscene.extras)

        compute_vnodes(gltf)

        gltf.display_current_node = 0  # for debugging
        BlenderNode.create_vnode(gltf, 'root')

        # User extensions before scene creation
        import_user_extensions('gather_import_scene_after_nodes_hook', gltf,
                               gltf.data.scenes[gltf.data.scene], scene)

        # User extensions after scene creation
        BlenderScene.create_animations(gltf)

        import_user_extensions('gather_import_scene_after_animation_hook',
                               gltf, gltf.data.scenes[gltf.data.scene], scene)

        if bpy.context.mode != 'OBJECT':
            bpy.ops.object.mode_set(mode='OBJECT')
        BlenderScene.select_imported_objects(gltf)
        BlenderScene.set_active_object(gltf)
    def anim(gltf, anim_idx):
        """Create actions/tracks for one animation."""
        # Caches the action for each object (keyed by object name)
        gltf.action_cache = {}
        # Things we need to stash when we're done.
        gltf.needs_stash = []

        import_user_extensions('gather_import_animation_before_hook', gltf,
                               anim_idx)

        for vnode_id in gltf.vnodes:
            if isinstance(vnode_id, int):
                BlenderNodeAnim.anim(gltf, anim_idx, vnode_id)
            BlenderWeightAnim.anim(gltf, anim_idx, vnode_id)

        # Push all actions onto NLA tracks with this animation's name
        track_name = gltf.data.animations[anim_idx].track_name
        for (obj, action) in gltf.needs_stash:
            simulate_stash(obj, track_name, action)

        import_user_extensions('gather_import_animation_after_hook', gltf,
                               anim_idx, track_name)
Пример #8
0
    def create_animations(gltf):
        """Create animations."""

        # Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook)
        class IMPORT_animation_options:
            def __init__(self, restore_first_anim: bool = True):
                self.restore_first_anim = restore_first_anim

        animation_options = IMPORT_animation_options()
        import_user_extensions('gather_import_animations', gltf,
                               gltf.data.animations, animation_options)

        if gltf.data.animations:
            # NLA tracks are added bottom to top, so create animations in
            # reverse so the first winds up on top
            for anim_idx in reversed(range(len(gltf.data.animations))):
                BlenderAnimation.anim(gltf, anim_idx)

            # Restore first animation
            if animation_options.restore_first_anim:
                anim_name = gltf.data.animations[0].track_name
                BlenderAnimation.restore_animation(gltf, anim_name)
Пример #9
0
    def create(gltf, vnode, camera_id):
        """Camera creation."""
        pycamera = gltf.data.cameras[camera_id]

        import_user_extensions('gather_import_camera_before_hook', gltf, vnode,
                               pycamera)

        if not pycamera.name:
            pycamera.name = "Camera"

        cam = bpy.data.cameras.new(pycamera.name)
        set_extras(cam, pycamera.extras)

        # Blender create a perspective camera by default
        if pycamera.type == "orthographic":
            cam.type = "ORTHO"

            # TODO: xmag/ymag

            cam.clip_start = pycamera.orthographic.znear
            cam.clip_end = pycamera.orthographic.zfar

        else:
            cam.angle_y = pycamera.perspective.yfov
            cam.lens_unit = "FOV"
            cam.sensor_fit = "VERTICAL"

            # TODO: fov/aspect ratio

            cam.clip_start = pycamera.perspective.znear
            if pycamera.perspective.zfar is not None:
                cam.clip_end = pycamera.perspective.zfar
            else:
                # Infinite projection
                cam.clip_end = 1e12  # some big number

        return cam
Пример #10
0
    def anim(gltf, anim_idx, vnode_id):
        """Manage animation."""
        vnode = gltf.vnodes[vnode_id]

        node_idx = vnode.mesh_node_idx

        import_user_extensions('gather_import_animation_weight_before_hook',
                               gltf, vnode, gltf.data.animations[anim_idx])

        if node_idx is None:
            return

        node = gltf.data.nodes[node_idx]
        obj = vnode.blender_object
        fps = bpy.context.scene.render.fps

        animation = gltf.data.animations[anim_idx]

        if anim_idx not in node.animations.keys():
            return

        for channel_idx in node.animations[anim_idx]:
            channel = animation.channels[channel_idx]
            if channel.target.path == "weights":
                break
        else:
            return

        name = animation.track_name + "_" + obj.name
        action = bpy.data.actions.new(name)
        action.id_root = "KEY"
        gltf.needs_stash.append((obj.data.shape_keys, action))

        keys = BinaryData.get_data_from_accessor(
            gltf, animation.samplers[channel.sampler].input)
        values = BinaryData.get_data_from_accessor(
            gltf, animation.samplers[channel.sampler].output)

        # retrieve number of targets
        pymesh = gltf.data.meshes[gltf.data.nodes[node_idx].mesh]
        nb_targets = len(pymesh.shapekey_names)

        if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE":
            offset = nb_targets
            stride = 3 * nb_targets
        else:
            offset = 0
            stride = nb_targets

        coords = [0] * (2 * len(keys))
        coords[::2] = (key[0] * fps for key in keys)

        for sk in range(nb_targets):
            if pymesh.shapekey_names[
                    sk] is not None:  # Do not animate shapekeys not created
                coords[1::2] = (values[offset + stride * i + sk][0]
                                for i in range(len(keys)))
                kb_name = pymesh.shapekey_names[sk]
                data_path = 'key_blocks["%s"].value' % bpy.utils.escape_identifier(
                    kb_name)

                make_fcurve(
                    action,
                    coords,
                    data_path=data_path,
                    group_name="ShapeKeys",
                    interpolation=animation.samplers[
                        channel.sampler].interpolation,
                )

                # Expand weight range if needed
                kb = obj.data.shape_keys.key_blocks[kb_name]
                min_weight = min(coords[1:2])
                max_weight = max(coords[1:2])
                if min_weight < kb.slider_min: kb.slider_min = min_weight
                if max_weight > kb.slider_max: kb.slider_max = max_weight

        import_user_extensions('gather_import_animation_weight_after_hook',
                               gltf, vnode, animation)
Пример #11
0
def texture(
    mh,
    tex_info,
    location,  # Upper-right corner of the TexImage node
    label,  # Label for the TexImg node
    color_socket,
    alpha_socket=None,
    is_data=False,
):
    """Creates nodes for a TextureInfo and hooks up the color/alpha outputs."""
    x, y = location
    pytexture = mh.gltf.data.textures[tex_info.index]

    import_user_extensions('gather_import_texture_before_hook', mh.gltf,
                           pytexture, mh, tex_info, location, label,
                           color_socket, alpha_socket, is_data)

    if pytexture.sampler is not None:
        pysampler = mh.gltf.data.samplers[pytexture.sampler]
    else:
        pysampler = Sampler.from_dict({})

    needs_uv_map = False  # whether to create UVMap node

    # Image Texture
    tex_img = mh.node_tree.nodes.new('ShaderNodeTexImage')
    tex_img.location = x - 240, y
    tex_img.label = label
    # Get image
    if pytexture.source is not None:
        BlenderImage.create(mh.gltf, pytexture.source)
        pyimg = mh.gltf.data.images[pytexture.source]
        blender_image_name = pyimg.blender_image_name
        if blender_image_name:
            tex_img.image = bpy.data.images[blender_image_name]
    # Set colorspace for data images
    if is_data:
        if tex_img.image:
            tex_img.image.colorspace_settings.is_data = True
    # Set filtering
    set_filtering(tex_img, pysampler)
    # Outputs
    mh.node_tree.links.new(color_socket, tex_img.outputs['Color'])
    if alpha_socket is not None:
        mh.node_tree.links.new(alpha_socket, tex_img.outputs['Alpha'])
    # Inputs
    uv_socket = tex_img.inputs[0]

    x -= 340

    # Do wrapping
    wrap_s = pysampler.wrap_s
    wrap_t = pysampler.wrap_t
    if wrap_s is None:
        wrap_s = TextureWrap.Repeat
    if wrap_t is None:
        wrap_t = TextureWrap.Repeat
    # If wrapping is REPEATxREPEAT or CLAMPxCLAMP, just set tex_img.extension
    if (wrap_s, wrap_t) == (TextureWrap.Repeat, TextureWrap.Repeat):
        tex_img.extension = 'REPEAT'
    elif (wrap_s, wrap_t) == (TextureWrap.ClampToEdge,
                              TextureWrap.ClampToEdge):
        tex_img.extension = 'EXTEND'
    else:
        # Otherwise separate the UV components and use math nodes to compute
        # the wrapped UV coordinates
        # => [Separate XYZ] => [Wrap for S] => [Combine XYZ] =>
        #                   => [Wrap for T] =>

        tex_img.extension = 'EXTEND'  # slightly better errors near the edge than REPEAT

        # Combine XYZ
        com_uv = mh.node_tree.nodes.new('ShaderNodeCombineXYZ')
        com_uv.location = x - 140, y - 100
        mh.node_tree.links.new(uv_socket, com_uv.outputs[0])
        u_socket = com_uv.inputs[0]
        v_socket = com_uv.inputs[1]
        x -= 200

        for i in [0, 1]:
            wrap = [wrap_s, wrap_t][i]
            socket = [u_socket, v_socket][i]
            if wrap == TextureWrap.Repeat:
                # WRAP node for REPEAT
                math = mh.node_tree.nodes.new('ShaderNodeMath')
                math.location = x - 140, y + 30 - i * 200
                math.operation = 'WRAP'
                math.inputs[1].default_value = 0
                math.inputs[2].default_value = 1
                mh.node_tree.links.new(socket, math.outputs[0])
                socket = math.inputs[0]
            elif wrap == TextureWrap.MirroredRepeat:
                # PINGPONG node for MIRRORED_REPEAT
                math = mh.node_tree.nodes.new('ShaderNodeMath')
                math.location = x - 140, y + 30 - i * 200
                math.operation = 'PINGPONG'
                math.inputs[1].default_value = 1
                mh.node_tree.links.new(socket, math.outputs[0])
                socket = math.inputs[0]
            else:
                # Pass-through CLAMP since the tex_img node is set to EXTEND
                pass
            if i == 0:
                u_socket = socket
            else:
                v_socket = socket
        x -= 200

        # Separate XYZ
        sep_uv = mh.node_tree.nodes.new('ShaderNodeSeparateXYZ')
        sep_uv.location = x - 140, y - 100
        mh.node_tree.links.new(u_socket, sep_uv.outputs[0])
        mh.node_tree.links.new(v_socket, sep_uv.outputs[1])
        uv_socket = sep_uv.inputs[0]
        x -= 200

        needs_uv_map = True

    # UV Transform (for KHR_texture_transform)
    needs_tex_transform = 'KHR_texture_transform' in (tex_info.extensions
                                                      or {})
    if needs_tex_transform:
        mapping = mh.node_tree.nodes.new('ShaderNodeMapping')
        mapping.location = x - 160, y + 30
        mapping.vector_type = 'POINT'
        # Outputs
        mh.node_tree.links.new(uv_socket, mapping.outputs[0])
        # Inputs
        uv_socket = mapping.inputs[0]

        transform = tex_info.extensions['KHR_texture_transform']
        transform = texture_transform_gltf_to_blender(transform)
        mapping.inputs['Location'].default_value[0] = transform['offset'][0]
        mapping.inputs['Location'].default_value[1] = transform['offset'][1]
        mapping.inputs['Rotation'].default_value[2] = transform['rotation']
        mapping.inputs['Scale'].default_value[0] = transform['scale'][0]
        mapping.inputs['Scale'].default_value[1] = transform['scale'][1]

        x -= 260
        needs_uv_map = True

    # UV Map
    uv_idx = tex_info.tex_coord or 0
    try:
        uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord']
    except Exception:
        pass
    if uv_idx != 0 or needs_uv_map:
        uv_map = mh.node_tree.nodes.new('ShaderNodeUVMap')
        uv_map.location = x - 160, y - 70
        uv_map.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx
        # Outputs
        mh.node_tree.links.new(uv_socket, uv_map.outputs[0])

    import_user_extensions('gather_import_texture_after_hook', mh.gltf,
                           pytexture, mh.node_tree, mh, tex_info, location,
                           label, color_socket, alpha_socket, is_data)
    def create_object(gltf, vnode_id):
        vnode = gltf.vnodes[vnode_id]

        if vnode.mesh_node_idx is not None:
            obj = BlenderNode.create_mesh_object(gltf, vnode)

        elif vnode.camera_node_idx is not None:
            pynode = gltf.data.nodes[vnode.camera_node_idx]
            cam = BlenderCamera.create(gltf, vnode, pynode.camera)
            name = vnode.name or cam.name
            obj = bpy.data.objects.new(name, cam)

            # Since we create the actual Blender object after the create call, we call the hook here
            import_user_extensions('gather_import_camera_after_hook', gltf, vnode, obj, cam)

        elif vnode.light_node_idx is not None:
            pynode = gltf.data.nodes[vnode.light_node_idx]
            light = BlenderLight.create(gltf, vnode, pynode.extensions['KHR_lights_punctual']['light'])
            name = vnode.name or light.name
            obj = bpy.data.objects.new(name, light)

            # Since we create the actual Blender object after the create call, we call the hook here
            import_user_extensions('gather_import_light_after_hook', gltf, vnode, obj, light)

        elif vnode.is_arma:
            armature = bpy.data.armatures.new(vnode.arma_name)
            name = vnode.name or armature.name
            obj = bpy.data.objects.new(name, armature)

        else:
            # Empty
            name = vnode.name or vnode.default_name
            obj = bpy.data.objects.new(name, None)
            obj.empty_display_size = BlenderNode.calc_empty_display_size(gltf, vnode_id)

        vnode.blender_object = obj

        # Set extras (if came from a glTF node)
        if isinstance(vnode_id, int):
            pynode = gltf.data.nodes[vnode_id]
            set_extras(obj, pynode.extras)

        # Set transform
        trans, rot, scale = vnode.trs()
        obj.location = trans
        obj.rotation_mode = 'QUATERNION'
        obj.rotation_quaternion = rot
        obj.scale = scale

        # Set parent
        if vnode.parent is not None:
            parent_vnode = gltf.vnodes[vnode.parent]
            if parent_vnode.type == VNode.Object:
                obj.parent = parent_vnode.blender_object
            elif parent_vnode.type == VNode.Bone:
                arma_vnode = gltf.vnodes[parent_vnode.bone_arma]
                obj.parent = arma_vnode.blender_object
                obj.parent_type = 'BONE'
                obj.parent_bone = parent_vnode.blender_bone_name

                # Nodes with a bone parent need to be translated
                # backwards from the tip to the root
                obj.location += Vector((0, -parent_vnode.bone_length, 0))

        bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj)

        return obj
    def do_channel(gltf, anim_idx, node_idx, channel):
        animation = gltf.data.animations[anim_idx]
        vnode = gltf.vnodes[node_idx]
        path = channel.target.path

        import_user_extensions('gather_import_animation_channel_before_hook',
                               gltf, animation, vnode, path, channel)

        action = BlenderNodeAnim.get_or_create_action(gltf, node_idx,
                                                      animation.track_name)

        keys = BinaryData.get_data_from_accessor(
            gltf, animation.samplers[channel.sampler].input)
        values = BinaryData.get_data_from_accessor(
            gltf, animation.samplers[channel.sampler].output)

        if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE":
            # TODO manage tangent?
            values = values[1::3]

        # Convert the curve from glTF to Blender.

        if path == "translation":
            blender_path = "location"
            group_name = "Location"
            num_components = 3
            values = [gltf.loc_gltf_to_blender(vals) for vals in values]
            values = vnode.base_locs_to_final_locs(values)

        elif path == "rotation":
            blender_path = "rotation_quaternion"
            group_name = "Rotation"
            num_components = 4
            values = [gltf.quaternion_gltf_to_blender(vals) for vals in values]
            values = vnode.base_rots_to_final_rots(values)

        elif path == "scale":
            blender_path = "scale"
            group_name = "Scale"
            num_components = 3
            values = [gltf.scale_gltf_to_blender(vals) for vals in values]
            values = vnode.base_scales_to_final_scales(values)

        # Objects parented to a bone are translated to the bone tip by default.
        # Correct for this by translating backwards from the tip to the root.
        if vnode.type == VNode.Object and path == "translation":
            if vnode.parent is not None and gltf.vnodes[
                    vnode.parent].type == VNode.Bone:
                bone_length = gltf.vnodes[vnode.parent].bone_length
                off = Vector((0, -bone_length, 0))
                values = [vals + off for vals in values]

        if vnode.type == VNode.Bone:
            # Need to animate the pose bone when the node is a bone.
            group_name = vnode.blender_bone_name
            blender_path = 'pose.bones["%s"].%s' % (
                bpy.utils.escape_identifier(
                    vnode.blender_bone_name), blender_path)

            # We have the final TRS of the bone in values. We need to give
            # the TRS of the pose bone though, which is relative to the edit
            # bone.
            #
            #     Final = EditBone * PoseBone
            #   where
            #     Final =    Trans[ft] Rot[fr] Scale[fs]
            #     EditBone = Trans[et] Rot[er]
            #     PoseBone = Trans[pt] Rot[pr] Scale[ps]
            #
            # Solving for PoseBone gives
            #
            #     pt = Rot[er^{-1}] (ft - et)
            #     pr = er^{-1} fr
            #     ps = fs

            if path == 'translation':
                edit_trans, edit_rot = vnode.editbone_trans, vnode.editbone_rot
                edit_rot_inv = edit_rot.conjugated()
                values = [
                    edit_rot_inv @ (trans - edit_trans) for trans in values
                ]

            elif path == 'rotation':
                edit_rot = vnode.editbone_rot
                edit_rot_inv = edit_rot.conjugated()
                values = [edit_rot_inv @ rot for rot in values]

            elif path == 'scale':
                pass  # no change needed

        # To ensure rotations always take the shortest path, we flip
        # adjacent antipodal quaternions.
        if path == 'rotation':
            for i in range(1, len(values)):
                if values[i].dot(values[i - 1]) < 0:
                    values[i] = -values[i]

        fps = bpy.context.scene.render.fps

        coords = [0] * (2 * len(keys))
        coords[::2] = (key[0] * fps for key in keys)

        for i in range(0, num_components):
            coords[1::2] = (vals[i] for vals in values)
            make_fcurve(
                action,
                coords,
                data_path=blender_path,
                index=i,
                group_name=group_name,
                interpolation=animation.samplers[
                    channel.sampler].interpolation,
            )

        import_user_extensions('gather_import_animation_channel_after_hook',
                               gltf, animation, vnode, path, channel, action)
Пример #14
0
def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
    """Put all primitive data into the mesh."""
    pymesh = gltf.data.meshes[mesh_idx]

    # Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook)
    class IMPORT_mesh_options:
        def __init__(self,
                     skinning: bool = True,
                     skin_into_bind_pose: bool = True,
                     use_auto_smooth: bool = True):
            self.skinning = skinning
            self.skin_into_bind_pose = skin_into_bind_pose
            self.use_auto_smooth = use_auto_smooth

    mesh_options = IMPORT_mesh_options()
    import_user_extensions('gather_import_mesh_options', gltf, mesh_options,
                           pymesh, skin_idx)

    # Scan the primitives to find out what we need to create

    has_normals = False
    num_uvs = 0
    num_cols = 0
    num_joint_sets = 0
    for prim in pymesh.primitives:
        if 'POSITION' not in prim.attributes:
            continue

        if gltf.import_settings['import_shading'] == "NORMALS":
            if 'NORMAL' in prim.attributes:
                has_normals = True

        if skin_idx is not None:
            i = 0
            while ('JOINTS_%d' % i) in prim.attributes and \
                    ('WEIGHTS_%d' % i) in prim.attributes:
                i += 1
            num_joint_sets = max(i, num_joint_sets)

        i = 0
        while i < UV_MAX and ('TEXCOORD_%d' % i) in prim.attributes:
            i += 1
        num_uvs = max(i, num_uvs)

        i = 0
        while i < COLOR_MAX and ('COLOR_%d' % i) in prim.attributes:
            i += 1
        num_cols = max(i, num_cols)

    num_shapekeys = 0
    if len(
            pymesh.primitives
    ) > 0:  # Empty primitive tab is not allowed, but some invalid files...
        for morph_i, _ in enumerate(pymesh.primitives[0].targets or []):
            if pymesh.shapekey_names[morph_i] is not None:
                num_shapekeys += 1

    # -------------
    # We'll process all the primitives gathering arrays to feed into the
    # various foreach_set function that create the mesh data.

    num_faces = 0  # total number of faces
    vert_locs = np.empty(dtype=np.float32,
                         shape=(0, 3))  # coordinate for each vert
    vert_normals = np.empty(dtype=np.float32,
                            shape=(0, 3))  # normal for each vert
    edge_vidxs = np.array([],
                          dtype=np.uint32)  # vertex_index for each loose edge
    loop_vidxs = np.array([], dtype=np.uint32)  # vertex_index for each loop
    loop_uvs = [
        np.empty(dtype=np.float32,
                 shape=(0, 2))  # UV for each loop for each layer
        for _ in range(num_uvs)
    ]
    loop_cols = [
        np.empty(dtype=np.float32,
                 shape=(0, 4))  # color for each loop for each layer
        for _ in range(num_cols)
    ]
    vert_joints = [
        np.empty(dtype=np.uint32,
                 shape=(0, 4))  # 4 joints for each vert for each set
        for _ in range(num_joint_sets)
    ]
    vert_weights = [
        np.empty(dtype=np.float32,
                 shape=(0, 4))  # 4 weights for each vert for each set
        for _ in range(num_joint_sets)
    ]
    sk_vert_locs = [
        np.empty(dtype=np.float32,
                 shape=(0, 3))  # coordinate for each vert for each shapekey
        for _ in range(num_shapekeys)
    ]

    for prim in pymesh.primitives:
        prim.num_faces = 0

        if 'POSITION' not in prim.attributes:
            continue

        vert_index_base = len(vert_locs)

        if prim.extensions is not None and 'KHR_draco_mesh_compression' in prim.extensions:
            print_console(
                'INFO',
                'Draco Decoder: Decode primitive {}'.format(pymesh.name
                                                            or '[unnamed]'))
            decode_primitive(gltf, prim)

        import_user_extensions('gather_import_decode_primitive', gltf, pymesh,
                               prim, skin_idx)

        if prim.indices is not None:
            indices = BinaryData.decode_accessor(gltf, prim.indices)
            indices = indices.reshape(len(indices))
        else:
            num_verts = gltf.data.accessors[prim.attributes['POSITION']].count
            indices = np.arange(0, num_verts, dtype=np.uint32)

        mode = 4 if prim.mode is None else prim.mode
        points, edges, tris = points_edges_tris(mode, indices)
        if points is not None:
            indices = points
        elif edges is not None:
            indices = edges
        else:
            indices = tris

        # We'll add one vert to the arrays for each index used in indices
        unique_indices, inv_indices = np.unique(indices, return_inverse=True)

        vs = BinaryData.decode_accessor(gltf,
                                        prim.attributes['POSITION'],
                                        cache=True)
        vert_locs = np.concatenate((vert_locs, vs[unique_indices]))

        if has_normals:
            if 'NORMAL' in prim.attributes:
                ns = BinaryData.decode_accessor(gltf,
                                                prim.attributes['NORMAL'],
                                                cache=True)
                ns = ns[unique_indices]
            else:
                ns = np.zeros((len(unique_indices), 3), dtype=np.float32)
            vert_normals = np.concatenate((vert_normals, ns))

        for i in range(num_joint_sets):
            if ('JOINTS_%d' % i) in prim.attributes and ('WEIGHTS_%d' %
                                                         i) in prim.attributes:
                js = BinaryData.decode_accessor(gltf,
                                                prim.attributes['JOINTS_%d' %
                                                                i],
                                                cache=True)
                ws = BinaryData.decode_accessor(gltf,
                                                prim.attributes['WEIGHTS_%d' %
                                                                i],
                                                cache=True)
                js = js[unique_indices]
                ws = ws[unique_indices]
            else:
                js = np.zeros((len(unique_indices), 4), dtype=np.uint32)
                ws = np.zeros((len(unique_indices), 4), dtype=np.float32)
            vert_joints[i] = np.concatenate((vert_joints[i], js))
            vert_weights[i] = np.concatenate((vert_weights[i], ws))

        for morph_i, target in enumerate(prim.targets or []):
            if pymesh.shapekey_names[morph_i] is None:
                continue
            morph_vs = BinaryData.decode_accessor(gltf,
                                                  target['POSITION'],
                                                  cache=True)
            morph_vs = morph_vs[unique_indices]
            sk_vert_locs[morph_i] = np.concatenate(
                (sk_vert_locs[morph_i], morph_vs))

        # inv_indices are the indices into the verts just for this prim;
        # calculate indices into the overall verts array
        prim_vidxs = inv_indices.astype(np.uint32, copy=False)
        prim_vidxs += vert_index_base  # offset for verts from previous prims

        if edges is not None:
            edge_vidxs = np.concatenate((edge_vidxs, prim_vidxs))

        if tris is not None:
            prim.num_faces = len(indices) // 3
            num_faces += prim.num_faces

            loop_vidxs = np.concatenate((loop_vidxs, prim_vidxs))

            for uv_i in range(num_uvs):
                if ('TEXCOORD_%d' % uv_i) in prim.attributes:
                    uvs = BinaryData.decode_accessor(
                        gltf,
                        prim.attributes['TEXCOORD_%d' % uv_i],
                        cache=True)
                    uvs = uvs[indices]
                else:
                    uvs = np.zeros((len(indices), 2), dtype=np.float32)
                loop_uvs[uv_i] = np.concatenate((loop_uvs[uv_i], uvs))

            for col_i in range(num_cols):
                if ('COLOR_%d' % col_i) in prim.attributes:
                    cols = BinaryData.decode_accessor(
                        gltf, prim.attributes['COLOR_%d' % col_i], cache=True)
                    cols = cols[indices]
                    if cols.shape[1] == 3:
                        cols = colors_rgb_to_rgba(cols)
                else:
                    cols = np.ones((len(indices), 4), dtype=np.float32)
                loop_cols[col_i] = np.concatenate((loop_cols[col_i], cols))

    # Accessors are cached in case they are shared between primitives; clear
    # the cache now that all prims are done.
    gltf.decode_accessor_cache = {}

    if gltf.import_settings['merge_vertices']:
        vert_locs, vert_normals, vert_joints, vert_weights, \
        sk_vert_locs, loop_vidxs, edge_vidxs = \
            merge_duplicate_verts(
                vert_locs, vert_normals, vert_joints, vert_weights, \
                sk_vert_locs, loop_vidxs, edge_vidxs\
            )

    # ---------------
    # Convert all the arrays glTF -> Blender

    # Change from relative to absolute positions for morph locs
    for sk_locs in sk_vert_locs:
        sk_locs += vert_locs

    gltf.locs_batch_gltf_to_blender(vert_locs)
    gltf.normals_batch_gltf_to_blender(vert_normals)
    for sk_locs in sk_vert_locs:
        gltf.locs_batch_gltf_to_blender(sk_locs)

    if num_joint_sets and mesh_options.skin_into_bind_pose:
        skin_into_bind_pose(
            gltf,
            skin_idx,
            vert_joints,
            vert_weights,
            locs=[vert_locs] + sk_vert_locs,
            vert_normals=vert_normals,
        )

    for uvs in loop_uvs:
        uvs_gltf_to_blender(uvs)

    # ---------------
    # Start creating things

    mesh.vertices.add(len(vert_locs))
    mesh.vertices.foreach_set('co', squish(vert_locs))

    mesh.loops.add(len(loop_vidxs))
    mesh.loops.foreach_set('vertex_index', loop_vidxs)

    mesh.edges.add(len(edge_vidxs) // 2)
    mesh.edges.foreach_set('vertices', edge_vidxs)

    mesh.polygons.add(num_faces)

    # All polys are tris
    loop_starts = np.arange(0, 3 * num_faces, step=3)
    loop_totals = np.full(num_faces, 3)
    mesh.polygons.foreach_set('loop_start', loop_starts)
    mesh.polygons.foreach_set('loop_total', loop_totals)

    for uv_i in range(num_uvs):
        name = 'UVMap' if uv_i == 0 else 'UVMap.%03d' % uv_i
        layer = mesh.uv_layers.new(name=name)

        if layer is None:
            print(
                "WARNING: UV map is ignored because the maximum number of UV layers has been reached."
            )
            break

        layer.data.foreach_set('uv', squish(loop_uvs[uv_i]))

    for col_i in range(num_cols):
        name = 'Col' if col_i == 0 else 'Col.%03d' % col_i
        layer = mesh.vertex_colors.new(name=name)

        if layer is None:
            print(
                "WARNING: Vertex colors are ignored because the maximum number of vertex color layers has been "
                "reached.")
            break

        mesh.color_attributes[layer.name].data.foreach_set(
            'color', squish(loop_cols[col_i]))

    # Skinning
    # TODO: this is slow :/
    if num_joint_sets and mesh_options.skinning:
        pyskin = gltf.data.skins[skin_idx]
        for i, node_idx in enumerate(pyskin.joints):
            bone = gltf.vnodes[node_idx]
            ob.vertex_groups.new(name=bone.blender_bone_name)

        vgs = list(ob.vertex_groups)

        for i in range(num_joint_sets):
            js = vert_joints[i].tolist()  # tolist() is faster
            ws = vert_weights[i].tolist()
            for vi in range(len(vert_locs)):
                w0, w1, w2, w3 = ws[vi]
                j0, j1, j2, j3 = js[vi]
                if w0 != 0: vgs[j0].add((vi, ), w0, 'REPLACE')
                if w1 != 0: vgs[j1].add((vi, ), w1, 'REPLACE')
                if w2 != 0: vgs[j2].add((vi, ), w2, 'REPLACE')
                if w3 != 0: vgs[j3].add((vi, ), w3, 'REPLACE')

    # Shapekeys
    if num_shapekeys:
        ob.shape_key_add(name='Basis')
        mesh.shape_keys.name = mesh.name

        sk_i = 0
        for sk_name in pymesh.shapekey_names:
            if sk_name is None:
                continue

            ob.shape_key_add(name=sk_name)
            key_block = mesh.shape_keys.key_blocks[sk_name]
            key_block.data.foreach_set('co', squish(sk_vert_locs[sk_i]))

            sk_i += 1

    # ----
    # Assign materials to faces
    has_materials = any(prim.material is not None
                        for prim in pymesh.primitives)
    if has_materials:
        material_indices = np.empty(num_faces, dtype=np.uint32)
        empty_material_slot_index = None
        f = 0

        for prim in pymesh.primitives:
            if prim.material is not None:
                # Get the material
                pymaterial = gltf.data.materials[prim.material]
                vertex_color = 'COLOR_0' if ('COLOR_0'
                                             in prim.attributes) else None
                if vertex_color not in pymaterial.blender_material:
                    BlenderMaterial.create(gltf, prim.material, vertex_color)
                material_name = pymaterial.blender_material[vertex_color]

                # Put material in slot (if not there)
                if material_name not in mesh.materials:
                    mesh.materials.append(bpy.data.materials[material_name])
                material_index = mesh.materials.find(material_name)
            else:
                if empty_material_slot_index is None:
                    mesh.materials.append(None)
                    empty_material_slot_index = len(mesh.materials) - 1
                material_index = empty_material_slot_index

            material_indices[f:f + prim.num_faces].fill(material_index)

            f += prim.num_faces

        mesh.polygons.foreach_set('material_index', material_indices)

    # ----
    # Normals

    # Set polys smooth/flat
    set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs)

    mesh.validate()
    has_loose_edges = len(
        edge_vidxs) != 0  # need to calc_loose_edges for them to show up
    mesh.update(calc_edges_loose=has_loose_edges)

    if has_normals:
        mesh.create_normals_split()
        mesh.normals_split_custom_set_from_vertices(vert_normals)
        mesh.use_auto_smooth = mesh_options.use_auto_smooth