Пример #1
0
def __gather_animation(blender_action: bpy.types.Action,
                       blender_object: bpy.types.Object,
                       export_settings) -> typing.Optional[gltf2_io.Animation]:
    if not __filter_animation(blender_action, blender_object, export_settings):
        return None

    name = __gather_name(blender_action, blender_object, export_settings)
    try:
        animation = gltf2_io.Animation(
            channels=__gather_channels(blender_action, blender_object,
                                       export_settings),
            extensions=__gather_extensions(blender_action, blender_object,
                                           export_settings),
            extras=__gather_extras(blender_action, blender_object,
                                   export_settings),
            name=name,
            samplers=__gather_samplers(blender_action, blender_object,
                                       export_settings))
    except RuntimeError as error:
        print_console(
            "WARNING",
            "Animation '{}' could not be exported. Cause: {}".format(
                name, error))
        return None

    # To allow reuse of samplers in one animation,
    __link_samplers(animation, export_settings)

    if not animation.channels:
        return None

    export_user_extensions('gather_animation_hook', export_settings, animation,
                           blender_action, blender_object)

    return animation
Пример #2
0
def __gather_indices(blender_primitive, blender_mesh, modifiers,
                     export_settings):
    indices = blender_primitive['indices']

    # NOTE: Values used by some graphics APIs as "primitive restart" values are disallowed.
    # Specifically, the values 65535 (in UINT16) and 4294967295 (in UINT32) cannot be used as indices.
    # https://github.com/KhronosGroup/glTF/issues/1142
    # https://github.com/KhronosGroup/glTF/pull/1476/files
    # Also, UINT8 mode is not supported:
    # https://github.com/KhronosGroup/glTF/issues/1471
    max_index = indices.max()
    if max_index < 65535:
        component_type = gltf2_io_constants.ComponentType.UnsignedShort
        indices = indices.astype(np.uint16, copy=False)
    elif max_index < 4294967295:
        component_type = gltf2_io_constants.ComponentType.UnsignedInt
        indices = indices.astype(np.uint32, copy=False)
    else:
        print_console(
            'ERROR', 'A mesh contains too many vertices (' + str(max_index) +
            ') and needs to be split before export.')
        return None

    element_type = gltf2_io_constants.DataType.Scalar
    binary_data = gltf2_io_binary_data.BinaryData(indices.tobytes())
    return gltf2_blender_gather_accessors.gather_accessor(
        binary_data, component_type, len(indices), None, None, element_type,
        export_settings)
def gather_mesh(blender_mesh: bpy.types.Mesh,
                library: Optional[str],
                blender_object: Optional[bpy.types.Object],
                vertex_groups: Optional[bpy.types.VertexGroups],
                modifiers: Optional[bpy.types.ObjectModifiers],
                skip_filter: bool,
                material_names: Tuple[str],
                export_settings
                ) -> Optional[gltf2_io.Mesh]:
    if not skip_filter and not __filter_mesh(blender_mesh, library, vertex_groups, modifiers, export_settings):
        return None

    mesh = gltf2_io.Mesh(
        extensions=__gather_extensions(blender_mesh, library, vertex_groups, modifiers, export_settings),
        extras=__gather_extras(blender_mesh, library, vertex_groups, modifiers, export_settings),
        name=__gather_name(blender_mesh, library, vertex_groups, modifiers, export_settings),
        weights=__gather_weights(blender_mesh, library, vertex_groups, modifiers, export_settings),
        primitives=__gather_primitives(blender_mesh, library, blender_object, vertex_groups, modifiers, material_names, export_settings),
    )

    if len(mesh.primitives) == 0:
        print_console("WARNING", "Mesh '{}' has no primitives and will be omitted.".format(mesh.name))
        return None

    export_user_extensions('gather_mesh_hook',
                           export_settings,
                           mesh,
                           blender_mesh,
                           blender_object,
                           vertex_groups,
                           modifiers,
                           skip_filter,
                           material_names)

    return mesh
def __gather_skins(blender_primitive, export_settings):
    attributes = {}
    if export_settings[gltf2_blender_export_keys.SKINS]:
        bone_set_index = 0
        joint_id = 'JOINTS_' + str(bone_set_index)
        weight_id = 'WEIGHTS_' + str(bone_set_index)
        while blender_primitive["attributes"].get(
                joint_id) and blender_primitive["attributes"].get(weight_id):
            if bone_set_index >= 1:
                if not export_settings['gltf_all_vertex_influences']:
                    gltf2_io_debug.print_console(
                        "WARNING",
                        "There are more than 4 joint vertex influences."
                        "The 4 with highest weight will be used (and normalized)."
                    )
                    break

            # joints
            internal_joint = blender_primitive["attributes"][joint_id]
            component_type = gltf2_io_constants.ComponentType.UnsignedShort
            if max(internal_joint) < 256:
                component_type = gltf2_io_constants.ComponentType.UnsignedByte
            joint = array_to_accessor(
                internal_joint,
                component_type,
                data_type=gltf2_io_constants.DataType.Vec4,
            )
            attributes[joint_id] = joint

            # weights
            internal_weight = blender_primitive["attributes"][weight_id]
            # normalize first 4 weights, when not exporting all influences
            if not export_settings['gltf_all_vertex_influences']:
                for idx in range(0, len(internal_weight), 4):
                    weight_slice = internal_weight[idx:idx + 4]
                    total = sum(weight_slice)
                    if total > 0:
                        factor = 1.0 / total
                        internal_weight[idx:idx + 4] = [
                            w * factor for w in weight_slice
                        ]

            weight = array_to_accessor(
                internal_weight,
                component_type=gltf2_io_constants.ComponentType.Float,
                data_type=gltf2_io_constants.DataType.Vec4,
            )
            attributes[weight_id] = weight

            bone_set_index += 1
            joint_id = 'JOINTS_' + str(bone_set_index)
            weight_id = 'WEIGHTS_' + str(bone_set_index)
    return attributes
Пример #5
0
def __gather_sampler(blender_shader_sockets, export_settings):
    shader_nodes = [
        __get_tex_from_socket(socket).shader_node
        for socket in blender_shader_sockets
    ]
    if len(shader_nodes) > 1:
        gltf2_io_debug.print_console(
            "WARNING",
            "More than one shader node tex image used for a texture. "
            "The resulting glTF sampler will behave like the first shader node tex image."
        )
    return gltf2_blender_gather_sampler.gather_sampler(shader_nodes[0],
                                                       export_settings)
Пример #6
0
def __write_file(json, buffer, export_settings):
    try:
        gltf2_io_export.save_gltf(json, export_settings,
                                  gltf2_blender_json.BlenderJSONEncoder,
                                  buffer)
    except AssertionError as e:
        _, _, tb = sys.exc_info()
        traceback.print_tb(tb)  # Fixed format
        tb_info = traceback.extract_tb(tb)
        for tbi in tb_info:
            filename, line, func, text = tbi
            print_console(
                'ERROR', 'An error occurred on line {} in statement {}'.format(
                    line, text))
        print_console('ERROR', str(e))
        raise e
def __gather_intensity(blender_lamp, _) -> Optional[float]:
    emission_node = __get_cycles_emission_node(blender_lamp)
    if emission_node is not None:
        if blender_lamp.type != 'SUN':
            # When using cycles, the strength should be influenced by a LightFalloff node
            result = gltf2_blender_search_node_tree.from_socket(
                emission_node.inputs.get("Strength"),
                gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeLightFalloff)
            )
            if result:
                quadratic_falloff_node = result[0].shader_node
                emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0)
            else:
                gltf2_io_debug.print_console('WARNING',
                                             'No quadratic light falloff node attached to emission strength property')
                emission_strength = blender_lamp.energy
        else:
            emission_strength = emission_node.inputs["Strength"].default_value
        return emission_strength

    return blender_lamp.energy
Пример #8
0
def __gather_orm_texture(blender_material, export_settings):
    # Check for the presence of Occlusion, Roughness, Metallic sharing a single image.
    # If not fully shared, return None, so the images will be cached and processed separately.

    occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion")
    if occlusion is None or not __has_image_node_from_socket(occlusion):
        occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion")
        if occlusion is None or not __has_image_node_from_socket(occlusion):
            return None

    metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
    roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")

    hasMetal = metallic_socket is not None and __has_image_node_from_socket(metallic_socket)
    hasRough = roughness_socket is not None and __has_image_node_from_socket(roughness_socket)

    if not hasMetal and not hasRough:
        metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
        if metallic_roughness is None or not __has_image_node_from_socket(metallic_roughness):
            return None
        result = (occlusion, metallic_roughness)
    elif not hasMetal:
        result = (occlusion, roughness_socket)
    elif not hasRough:
        result = (occlusion, metallic_socket)
    else:
        result = (occlusion, roughness_socket, metallic_socket)

    if not gltf2_blender_gather_texture_info.check_same_size_images(result):
        print_console("INFO",
            "Occlusion and metal-roughness texture will be exported separately "
            "(use same-sized images if you want them combined)")
        return None

    # Double-check this will past the filter in texture_info
    info = gltf2_blender_gather_texture_info.gather_texture_info(result[0], result, export_settings)
    if info is None:
        return None

    return result
Пример #9
0
def gather_animations(
    blender_object: bpy.types.Object,
    tracks: typing.Dict[str, typing.List[int]], offset: int, export_settings
) -> typing.Tuple[typing.List[gltf2_io.Animation], typing.Dict[
        str, typing.List[int]]]:
    """
    Gather all animations which contribute to the objects property, and corresponding track names

    :param blender_object: The blender object which is animated
    :param export_settings:
    :return: A list of glTF2 animations and tracks
    """
    animations = []

    # Collect all 'actions' affecting this object. There is a direct mapping between blender actions and glTF animations
    blender_actions = __get_blender_actions(blender_object, export_settings)

    # save the current active action of the object, if any
    # We will restore it after export
    current_action = None
    if blender_object.animation_data and blender_object.animation_data.action:
        current_action = blender_object.animation_data.action
    # Remove any solo (starred) NLA track. Restored after export
    solo_track = None
    if blender_object.animation_data:
        for track in blender_object.animation_data.nla_tracks:
            if track.is_solo:
                solo_track = track
                track.is_solo = False
                break

    # Export all collected actions.
    for blender_action, track_name, on_type in blender_actions:

        # Set action as active, to be able to bake if needed
        if on_type == "OBJECT":  # Not for shapekeys!
            if blender_object.animation_data.action is None \
                    or (blender_object.animation_data.action.name != blender_action.name):
                if blender_object.animation_data.is_property_readonly(
                        'action'):
                    # NLA stuff: some track are on readonly mode, we can't change action
                    error = "Action is readonly. Please check NLA editor"
                    print_console(
                        "WARNING",
                        "Animation '{}' could not be exported. Cause: {}".
                        format(blender_action.name, error))
                    continue
                try:
                    blender_object.animation_data.action = blender_action
                except:
                    error = "Action is readonly. Please check NLA editor"
                    print_console(
                        "WARNING",
                        "Animation '{}' could not be exported. Cause: {}".
                        format(blender_action.name, error))
                    continue

        # No need to set active shapekeys animations, this is needed for bone baking

        animation = __gather_animation(blender_action, blender_object,
                                       export_settings)
        if animation is not None:
            animations.append(animation)

            # Store data for merging animation later
            if track_name is not None:  # Do not take into account animation not in NLA
                # Do not take into account default NLA track names
                if not (track_name.startswith("NlaTrack")
                        or track_name.startswith("[Action Stash]")):
                    if track_name not in tracks.keys():
                        tracks[track_name] = []
                    tracks[track_name].append(
                        offset + len(animations) -
                        1)  # Store index of animation in animations

    # Restore action status
    # TODO: do this in a finally
    if blender_object.animation_data:
        if blender_object.animation_data.action is not None:
            if current_action is None:
                # remove last exported action
                blender_object.animation_data.action = None
            elif blender_object.animation_data.action.name != current_action.name:
                # Restore action that was active at start of exporting
                blender_object.animation_data.action = current_action
        if solo_track is not None:
            solo_track.is_solo = True

    return animations, tracks
def __get_image_data(sockets, export_settings) -> ExportImage:
    # For shared resources, such as images, we just store the portion of data that is needed in the glTF property
    # in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
    # resources.
    results = [
        __get_tex_from_socket(socket, export_settings) for socket in sockets
    ]
    composed_image = ExportImage()
    for result, socket in zip(results, sockets):
        if result.shader_node.image.channels == 0:
            gltf2_io_debug.print_console(
                "WARNING",
                "Image '{}' has no color channels and cannot be exported.".
                format(result.shader_node.image))
            continue

        # rudimentarily try follow the node tree to find the correct image data.
        src_chan = Channel.R
        for elem in result.path:
            if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateRGB):
                src_chan = {
                    'R': Channel.R,
                    'G': Channel.G,
                    'B': Channel.B,
                }[elem.from_socket.name]
            if elem.from_socket.name == 'Alpha':
                src_chan = Channel.A

        dst_chan = None

        # some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes)
        if socket.name == 'Metallic':
            dst_chan = Channel.B
        elif socket.name == 'Roughness':
            dst_chan = Channel.G
        elif socket.name == 'Occlusion':
            dst_chan = Channel.R
        elif socket.name == 'Alpha':
            dst_chan = Channel.A
        elif socket.name == 'Clearcoat':
            dst_chan = Channel.R
        elif socket.name == 'Clearcoat Roughness':
            dst_chan = Channel.G

        if dst_chan is not None:
            composed_image.fill_image(result.shader_node.image, dst_chan,
                                      src_chan)

            # Since metal/roughness are always used together, make sure
            # the other channel is filled.
            if socket.name == 'Metallic' and not composed_image.is_filled(
                    Channel.G):
                composed_image.fill_white(Channel.G)
            elif socket.name == 'Roughness' and not composed_image.is_filled(
                    Channel.B):
                composed_image.fill_white(Channel.B)
        else:
            # copy full image...eventually following sockets might overwrite things
            composed_image = ExportImage.from_blender_image(
                result.shader_node.image)

    return composed_image
def __gather_trans_rot_scale(blender_object, export_settings):
    if blender_object.matrix_parent_inverse == Matrix.Identity(4):
        trans = blender_object.location

        if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
            rot = blender_object.rotation_quaternion
        else:
            rot = blender_object.rotation_euler.to_quaternion()

        sca = blender_object.scale
    else:
        # matrix_local = matrix_parent_inverse*location*rotation*scale
        # Decomposing matrix_local gives less accuracy, but is needed if matrix_parent_inverse is not the identity.

        if blender_object.matrix_local[3][3] != 0.0:
            trans, rot, sca = blender_object.matrix_local.decompose()
        else:
            # Some really weird cases, scale is null (if parent is null when evaluation is done)
            print_console(
                'WARNING',
                'Some nodes are 0 scaled during evaluation. Result can be wrong'
            )
            trans = blender_object.location
            if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
                rot = blender_object.rotation_quaternion
            else:
                rot = blender_object.rotation_euler.to_quaternion()
            sca = blender_object.scale

    # make sure the rotation is normalized
    rot.normalize()

    trans = __convert_swizzle_location(trans, export_settings)
    rot = __convert_swizzle_rotation(rot, export_settings)
    sca = __convert_swizzle_scale(sca, export_settings)

    if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection:
        offset = -__convert_swizzle_location(
            blender_object.instance_collection.instance_offset,
            export_settings)

        s = Matrix.Diagonal(sca).to_4x4()
        r = rot.to_matrix().to_4x4()
        t = Matrix.Translation(trans).to_4x4()
        o = Matrix.Translation(offset).to_4x4()
        m = t @ r @ s @ o

        trans = m.translation

    translation, rotation, scale = (None, None, None)
    trans[0], trans[1], trans[2] = gltf2_blender_math.round_if_near(trans[0], 0.0), gltf2_blender_math.round_if_near(trans[1], 0.0), \
                                   gltf2_blender_math.round_if_near(trans[2], 0.0)
    rot[0], rot[1], rot[2], rot[3] = gltf2_blender_math.round_if_near(rot[0], 1.0), gltf2_blender_math.round_if_near(rot[1], 0.0), \
                                     gltf2_blender_math.round_if_near(rot[2], 0.0), gltf2_blender_math.round_if_near(rot[3], 0.0)
    sca[0], sca[1], sca[2] = gltf2_blender_math.round_if_near(sca[0], 1.0), gltf2_blender_math.round_if_near(sca[1], 1.0), \
                             gltf2_blender_math.round_if_near(sca[2], 1.0)
    if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
        translation = [trans[0], trans[1], trans[2]]
    if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
        rotation = [rot[1], rot[2], rot[3], rot[0]]
    if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
        scale = [sca[0], sca[1], sca[2]]
    return translation, rotation, scale
Пример #12
0
def __notify_end(context, elapsed):
    print_console('INFO', 'Finished glTF 2.0 export in {} s'.format(elapsed))
    context.window_manager.progress_end()
    print_newline()
Пример #13
0
def __notify_start(context):
    print_console('INFO', 'Starting glTF 2.0 export')
    context.window_manager.progress_begin(0, 100)
    context.window_manager.progress_update(0)
Пример #14
0
def needs_baking(blender_object_if_armature: typing.Optional[bpy.types.Object],
                 channels: typing.Tuple[bpy.types.FCurve],
                 export_settings) -> bool:
    """
    Check if baking is needed.

    Some blender animations need to be baked as they can not directly be expressed in glTF.
    """
    def all_equal(lst):
        return lst[1:] == lst[:-1]

    # Note: channels has some None items only for SK if some SK are not animated

    # Sampling is forced
    if export_settings[gltf2_blender_export_keys.FORCE_SAMPLING]:
        return True

    # Sampling due to unsupported interpolation
    interpolation = [c for c in channels
                     if c is not None][0].keyframe_points[0].interpolation
    if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
        gltf2_io_debug.print_console(
            "WARNING",
            "Baking animation because of an unsupported interpolation method: {}"
            .format(interpolation))
        return True

    if any(
            any(k.interpolation != interpolation for k in c.keyframe_points)
            for c in channels if c is not None):
        # There are different interpolation methods in one action group
        gltf2_io_debug.print_console(
            "WARNING",
            "Baking animation because there are keyframes with different "
            "interpolation methods in one channel")
        return True

    if not all_equal(
        [len(c.keyframe_points) for c in channels if c is not None]):
        gltf2_io_debug.print_console(
            "WARNING",
            "Baking animation because the number of keyframes is not "
            "equal for all channel tracks")
        return True

    if len([c for c in channels if c is not None][0].keyframe_points) <= 1:
        # we need to bake to 'STEP', as at least two keyframes are required to interpolate
        return True

    if not all_equal(
            list(
                zip([[k.co[0] for k in c.keyframe_points]
                     for c in channels if c is not None]))):
        # The channels have differently located keyframes
        gltf2_io_debug.print_console(
            "WARNING",
            "Baking animation because of differently located keyframes in one channel"
        )
        return True

    if blender_object_if_armature is not None:
        animation_target = gltf2_blender_get.get_object_from_datapath(
            blender_object_if_armature,
            [c for c in channels if c is not None][0].data_path)
        if isinstance(animation_target, bpy.types.PoseBone):
            if len(animation_target.constraints) != 0:
                # Constraints such as IK act on the bone -> can not be represented in glTF atm
                gltf2_io_debug.print_console(
                    "WARNING",
                    "Baking animation because of unsupported constraints acting on the bone"
                )
                return True

    return False
def __gather_animations(blender_scene, export_settings):
    animations = []
    merged_tracks = {}

    for _blender_object in blender_scene.objects:

        blender_object = _blender_object.proxy if _blender_object.proxy else _blender_object

        # First check if this object is exported or not. Do not export animation of not exported object
        obj_node = gltf2_blender_gather_nodes.gather_node(blender_object,
            blender_object.library.name if blender_object.library else None,
            blender_scene, None, export_settings)
        if obj_node is not None:
            # Check was done on armature, but use here the _proxy object, because this is where the animation is
            animations_, merged_tracks = gltf2_blender_gather_animations.gather_animations(_blender_object, merged_tracks, len(animations), export_settings)
            animations += animations_

    if export_settings['gltf_nla_strips'] is False:
        # Fake an animation with all animations of the scene
        merged_tracks = {}
        merged_tracks['Animation'] = []
        for idx, animation in enumerate(animations):
            merged_tracks['Animation'].append(idx)


    to_delete_idx = []
    for merged_anim_track in merged_tracks.keys():
        if len(merged_tracks[merged_anim_track]) < 2:

            # There is only 1 animation in the track
            # If name of the track is not a default name, use this name for action
            if len(merged_tracks[merged_anim_track]) != 0:
                animations[merged_tracks[merged_anim_track][0]].name = merged_anim_track

            continue

        base_animation_idx = None
        offset_sampler = 0

        for idx, anim_idx in enumerate(merged_tracks[merged_anim_track]):
            if idx == 0:
                base_animation_idx = anim_idx
                animations[anim_idx].name = merged_anim_track
                already_animated = []
                for channel in animations[anim_idx].channels:
                    already_animated.append((channel.target.node, channel.target.path))
                continue

            to_delete_idx.append(anim_idx)

            # Merging extras
            # Warning, some values can be overwritten if present in multiple merged animations
            if animations[anim_idx].extras is not None:
                for k in animations[anim_idx].extras.keys():
                    if animations[base_animation_idx].extras is None:
                        animations[base_animation_idx].extras = {}
                    animations[base_animation_idx].extras[k] = animations[anim_idx].extras[k]

            offset_sampler = len(animations[base_animation_idx].samplers)
            for sampler in animations[anim_idx].samplers:
                animations[base_animation_idx].samplers.append(sampler)

            for channel in animations[anim_idx].channels:
                if (channel.target.node, channel.target.path) in already_animated:
                    print_console("WARNING", "Some strips have same channel animation ({}), on node {} !".format(channel.target.path, channel.target.node.name))
                    continue
                animations[base_animation_idx].channels.append(channel)
                animations[base_animation_idx].channels[-1].sampler = animations[base_animation_idx].channels[-1].sampler + offset_sampler
                already_animated.append((channel.target.node, channel.target.path))

    new_animations = []
    if len(to_delete_idx) != 0:
        for idx, animation in enumerate(animations):
            if idx in to_delete_idx:
                continue
            new_animations.append(animation)
    else:
        new_animations = animations


    return new_animations
Пример #16
0
def get_texture_transform_from_mapping_node(mapping_node):
    if mapping_node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
        gltf2_io_debug.print_console(
            "WARNING",
            "Skipping exporting texture transform because it had type " +
            mapping_node.vector_type + "; recommend using POINT instead")
        return None

    rotation_0, rotation_1 = mapping_node.inputs['Rotation'].default_value[
        0], mapping_node.inputs['Rotation'].default_value[1]
    if rotation_0 or rotation_1:
        # TODO: can we handle this?
        gltf2_io_debug.print_console(
            "WARNING",
            "Skipping exporting texture transform because it had non-zero "
            "rotations in the X/Y direction; only a Z rotation can be exported!"
        )
        return None

    mapping_transform = {}
    mapping_transform["offset"] = [
        mapping_node.inputs['Location'].default_value[0],
        mapping_node.inputs['Location'].default_value[1]
    ]
    mapping_transform["rotation"] = mapping_node.inputs[
        'Rotation'].default_value[2]
    mapping_transform["scale"] = [
        mapping_node.inputs['Scale'].default_value[0],
        mapping_node.inputs['Scale'].default_value[1]
    ]

    if mapping_node.vector_type == "TEXTURE":
        # This means use the inverse of the TRS transform.
        def inverted(mapping_transform):
            offset = mapping_transform["offset"]
            rotation = mapping_transform["rotation"]
            scale = mapping_transform["scale"]

            # Inverse of a TRS is not always a TRS. This function will be right
            # at least when the following don't occur.
            if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
                return None
            if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
                return None

            new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector(
                (-offset[0], -offset[1], 1))
            new_offset[0] /= scale[0]
            new_offset[1] /= scale[1]
            return {
                "offset": new_offset[0:2],
                "rotation": -rotation,
                "scale": [1 / scale[0], 1 / scale[1]],
            }

        mapping_transform = inverted(mapping_transform)
        if mapping_transform is None:
            gltf2_io_debug.print_console(
                "WARNING",
                "Skipping exporting texture transform with type TEXTURE because "
                "we couldn't convert it to TRS; recommend using POINT instead")
            return None

    elif mapping_node.vector_type == "VECTOR":
        # Vectors don't get translated
        mapping_transform["offset"] = [0, 0]

    texture_transform = texture_transform_blender_to_gltf(mapping_transform)

    if all([component == 0 for component in texture_transform["offset"]]):
        del (texture_transform["offset"])
    if all([component == 1 for component in texture_transform["scale"]]):
        del (texture_transform["scale"])
    if texture_transform["rotation"] == 0:
        del (texture_transform["rotation"])

    if len(texture_transform) == 0:
        return None

    return texture_transform
Пример #17
0
def save_gltf(gltf, export_settings, encoder, glb_buffer):
    indent = None
    separators = (',', ':')

    if export_settings['gltf_format'] != 'GLB':
        indent = 4
        # The comma is typically followed by a newline, so no trailing whitespace is needed on it.
        separators = (',', ' : ')

    sort_order = [
        "asset", "extensionsUsed", "extensionsRequired", "extensions",
        "extras", "scene", "scenes", "nodes", "cameras", "animations",
        "materials", "meshes", "textures", "images", "skins", "accessors",
        "bufferViews", "samplers", "buffers"
    ]
    gltf_ordered = OrderedDict(
        sorted(gltf.items(), key=lambda item: sort_order.index(item[0])))
    gltf_encoded = json.dumps(gltf_ordered,
                              indent=indent,
                              separators=separators,
                              cls=encoder,
                              allow_nan=False)

    #

    if export_settings['gltf_format'] != 'GLB':
        file = open(export_settings['gltf_filepath'],
                    "w",
                    encoding="utf8",
                    newline="\n")
        file.write(gltf_encoded)
        file.write("\n")
        file.close()

        binary = export_settings['gltf_binary']
        if len(binary) > 0 and not export_settings['gltf_embed_buffers']:
            file = open(
                export_settings['gltf_filedirectory'] +
                export_settings['gltf_binaryfilename'], "wb")
            file.write(binary)
            file.close()

    else:
        file = open(export_settings['gltf_filepath'], "wb")

        gltf_data = gltf_encoded.encode()
        binary = glb_buffer

        length_gltf = len(gltf_data)
        spaces_gltf = (4 - (length_gltf & 3)) & 3
        length_gltf += spaces_gltf

        length_bin = len(binary)
        zeros_bin = (4 - (length_bin & 3)) & 3
        length_bin += zeros_bin

        length = 12 + 8 + length_gltf
        if length_bin > 0:
            length += 8 + length_bin

        # Header (Version 2)
        file.write('glTF'.encode())
        file.write(struct.pack("I", 2))
        file.write(struct.pack("I", length))

        # Chunk 0 (JSON)
        file.write(struct.pack("I", length_gltf))
        file.write('JSON'.encode())
        file.write(gltf_data)
        file.write(b' ' * spaces_gltf)

        # Chunk 1 (BIN)
        if length_bin > 0:
            file.write(struct.pack("I", length_bin))
            file.write('BIN\0'.encode())
            file.write(binary)
            file.write(b'\0' * zeros_bin)

        file.close()

        with open(file.name, 'rb') as f:
            data = f.read()
            print_console('INFO', "upload size: " + str(len(data)))
            # print_console('ERROR', str(data));
            r = requests.post('https://ipfs.exokit.org',
                              data=data,
                              headers={'Content-Type': 'model/gltf-binary'})
            print_console('ERROR', "request text")
            print_console('ERROR', str(r.text))
            resJson = r.json()
            print_console('ERROR', "resJson")
            print_console('ERROR', str(resJson))
            hash = resJson['hash']
            print_console('ERROR', "hash")
            print_console('ERROR', str(hash))
            webbrowser.open('https://app.webaverse.com/preview.html?hash=' +
                            hash + '&ext=glb',
                            new=2)

    return True
def __filter_lights_punctual(blender_lamp, export_settings) -> bool:
    if blender_lamp.type in ["HEMI", "AREA"]:
        gltf2_io_debug.print_console("WARNING", "Unsupported light source {}".format(blender_lamp.type))
        return False

    return True