Esempio n. 1
0
def build_light(data):
    light_path, start = common.decode_string(data, 0)
    light_name, start = common.decode_string(data, start)
    logger.info("build_light %s", light_path)
    light_type, start = common.decode_int(data, start)
    blighttype = "POINT"
    if light_type == common.LightType.SUN.value:
        blighttype = "SUN"
    elif light_type == common.LightType.POINT.value:
        blighttype = "POINT"
    elif light_type == common.LightType.AREA.value:
        blighttype = "AREA"
    else:
        blighttype = "SPOT"

    light = get_or_create_light(light_name, blighttype)

    shadow, start = common.decode_int(data, start)
    if shadow != 0:
        light.use_shadow = True
    else:
        light.use_shadow = False

    color, start = common.decode_color(data, start)
    light.color = (color[0], color[1], color[2])
    light.energy, start = common.decode_float(data, start)
    if light_type == common.LightType.SPOT.value:
        light.spot_size, start = common.decode_float(data, start)
        light.spot_blend, start = common.decode_float(data, start)

    get_or_create_object_data(light_path, light)
Esempio n. 2
0
def build_camera(data):
    camera_path, start = common.decode_string(data, 0)
    logger.info("build_camera %s", camera_path)
    camera_name, start = common.decode_string(data, start)
    camera = get_or_create_camera(camera_name)

    camera.lens, start = common.decode_float(data, start)
    camera.clip_start, start = common.decode_float(data, start)
    camera.clip_end, start = common.decode_float(data, start)
    camera.dof.use_dof, start = common.decode_bool(data, start)
    camera.dof.aperture_fstop, start = common.decode_float(data, start)
    colimator_name, start = common.decode_string(data, start)
    sensor_fit, start = common.decode_int(data, start)
    camera.sensor_width, start = common.decode_float(data, start)
    camera.sensor_height, start = common.decode_float(data, start)

    if sensor_fit == 0:
        camera.sensor_fit = "AUTO"
    elif sensor_fit == 1:
        camera.sensor_fit = "VERTICAL"
    else:
        camera.sensor_fit = "HORIZONTAL"

    get_or_create_object_data(camera_path, camera)

    # colimator
    if len(colimator_name) > 0:
        camera.dof.use_dof = True
        camera.dof.focus_object = get_or_create_path(colimator_name)
Esempio n. 3
0
    def build_add_keyframe(self, data):
        index = 0
        name, index = common.decode_string(data, index)
        if name not in share_data.blender_objects:
            return name
        ob = share_data.blender_objects[name]
        channel, index = common.decode_string(data, index)
        channel_index, index = common.decode_int(data, index)
        frame, index = common.decode_int(data, index)
        value, index = common.decode_float(data, index)

        if not hasattr(ob, channel):
            ob = ob.data

        attr = getattr(ob, channel)
        if channel_index != -1:
            attr[channel_index] = value
        else:
            attr = value
        setattr(ob, channel, attr)
        ob.keyframe_insert(channel, frame=float(frame), index=channel_index)
        return name
Esempio n. 4
0
def decode_base_mesh(client, obj, data, index):
    bm = bmesh.new()

    position_count, index = common.decode_int(data, index)
    logger.debug("Reading %d vertices", position_count)

    for _pos_idx in range(position_count):
        co, index = common.decode_vector3(data, index)
        bm.verts.new(co)

    bm.verts.ensure_lookup_table()

    index = decode_bmesh_layer(data, index, bm.verts.layers.bevel_weight,
                               bm.verts, decode_layer_float)

    edge_count, index = common.decode_int(data, index)
    logger.debug("Reading %d edges", edge_count)

    edges_data = struct.unpack(f"{edge_count * 4}I",
                               data[index:index + edge_count * 4 * 4])
    index += edge_count * 4 * 4

    for edge_idx in range(edge_count):
        v1 = edges_data[edge_idx * 4]
        v2 = edges_data[edge_idx * 4 + 1]
        edge = bm.edges.new((bm.verts[v1], bm.verts[v2]))
        edge.smooth = bool(edges_data[edge_idx * 4 + 2])
        edge.seam = bool(edges_data[edge_idx * 4 + 3])

    index = decode_bmesh_layer(data, index, bm.edges.layers.bevel_weight,
                               bm.edges, decode_layer_float)
    index = decode_bmesh_layer(data, index, bm.edges.layers.crease, bm.edges,
                               decode_layer_float)

    face_count, index = common.decode_int(data, index)
    logger.debug("Reading %d faces", face_count)

    for _face_idx in range(face_count):
        material_idx, index = common.decode_int(data, index)
        smooth, index = common.decode_bool(data, index)
        vert_count, index = common.decode_int(data, index)
        face_vertices = struct.unpack(f"{vert_count}I",
                                      data[index:index + vert_count * 4])
        index += vert_count * 4
        verts = [bm.verts[i] for i in face_vertices]
        face = bm.faces.new(verts)
        face.material_index = material_idx
        face.smooth = smooth

    index = decode_bmesh_layer(data, index, bm.faces.layers.face_map, bm.faces,
                               decode_layer_int)

    index = decode_bmesh_layer(data, index, bm.loops.layers.uv,
                               loops_iterator(bm), decode_layer_uv)
    index = decode_bmesh_layer(data, index, bm.loops.layers.color,
                               loops_iterator(bm), decode_layer_color)

    bm.normal_update()
    bm.to_mesh(obj.data)
    bm.free()

    # Load shape keys
    shape_keys_count, index = common.decode_int(data, index)
    obj.shape_key_clear()
    if shape_keys_count > 0:
        logger.debug("Loading %d shape keys", shape_keys_count)
        shapes_keys_list = []
        for _i in range(shape_keys_count):
            shape_key_name, index = common.decode_string(data, index)
            shapes_keys_list.append(obj.shape_key_add(name=shape_key_name))
        for i in range(shape_keys_count):
            shapes_keys_list[i].vertex_group, index = common.decode_string(
                data, index)
        for i in range(shape_keys_count):
            relative_key_name, index = common.decode_string(data, index)
            shapes_keys_list[i].relative_key = obj.data.shape_keys.key_blocks[
                relative_key_name]

        for i in range(shape_keys_count):
            shape_key = shapes_keys_list[i]
            shape_key.mute, index = common.decode_bool(data, index)
            shape_key.value, index = common.decode_float(data, index)
            shape_key.slider_min, index = common.decode_float(data, index)
            shape_key.slider_max, index = common.decode_float(data, index)
            shape_key_data_size, index = common.decode_int(data, index)
            for i in range(shape_key_data_size):
                shape_key.data[i].co = Vector(
                    struct.unpack("3f", data[index:index + 3 * 4]))
                index += 3 * 4
        obj.data.shape_keys.use_relative, index = common.decode_bool(
            data, index)

    # Vertex Groups
    vg_count, index = common.decode_int(data, index)
    obj.vertex_groups.clear()
    for _i in range(vg_count):
        vg_name, index = common.decode_string(data, index)
        vertex_group = obj.vertex_groups.new(name=vg_name)
        vertex_group.lock_weight, index = common.decode_bool(data, index)
        vg_size, index = common.decode_int(data, index)
        for _elmt_idx in range(vg_size):
            vert_idx, index = common.decode_int(data, index)
            weight, index = common.decode_float(data, index)
            vertex_group.add([vert_idx], weight, "REPLACE")

    # Normals
    obj.data.use_auto_smooth, index = common.decode_bool(data, index)
    obj.data.auto_smooth_angle, index = common.decode_float(data, index)

    has_custom_normal, index = common.decode_bool(data, index)

    if has_custom_normal:
        normals = []
        for _loop in obj.data.loops:
            normal, index = common.decode_vector3(data, index)
            normals.append(normal)
        obj.data.normals_split_custom_set(normals)

    # UV Maps and Vertex Colors are added automatically based on layers in the bmesh
    # We just need to update their name and active_render state:

    # UV Maps
    for uv_layer in obj.data.uv_layers:
        uv_layer.name, index = common.decode_string(data, index)
        uv_layer.active_render, index = common.decode_bool(data, index)

    # Vertex Colors
    for vertex_colors in obj.data.vertex_colors:
        vertex_colors.name, index = common.decode_string(data, index)
        vertex_colors.active_render, index = common.decode_bool(data, index)

    return index
Esempio n. 5
0
def decode_layer_float(elmt, layer, data, index):
    elmt[layer], index = common.decode_float(data, index)
    return index
Esempio n. 6
0
def build_material(data):
    material_name_length = common.bytes_to_int(data[:4])
    start = 4
    end = start + material_name_length
    material_name = data[start:end].decode()
    start = end

    material = get_or_create_material(material_name)
    nodes = material.node_tree.nodes
    # Get a principled node
    principled = None
    if nodes:
        for n in nodes:
            if n.type == "BSDF_PRINCIPLED":
                principled = n
                break

    if not principled:
        logger.error("Cannot find Principled BSDF node")
        return

    index = start

    # Transmission ( 1 - opacity)
    transmission, index = common.decode_float(data, index)
    transmission = 1 - transmission
    principled.inputs["Transmission"].default_value = transmission
    file_name, index = common.decode_string(data, index)
    if len(file_name) > 0:
        invert = material.node_tree.nodes.new("ShaderNodeInvert")
        material.node_tree.links.new(principled.inputs["Transmission"], invert.outputs["Color"])
        tex_image = material.node_tree.nodes.new("ShaderNodeTexImage")
        try:
            tex_image.image = bpy.data.images.load(get_resolved_file_path(file_name))
            tex_image.image.colorspace_settings.name = "Non-Color"
        except Exception as e:
            logger.error("could not load file %s ...", get_resolved_file_path(file_name))
            logger.error("... %s", e)
        material.node_tree.links.new(invert.inputs["Color"], tex_image.outputs["Color"])

    # Base Color
    base_color, index = common.decode_color(data, index)
    material.diffuse_color = (base_color[0], base_color[1], base_color[2], 1)
    principled.inputs["Base Color"].default_value = material.diffuse_color
    index = build_texture(principled, material, "Base Color", True, data, index)

    # Metallic
    material.metallic, index = common.decode_float(data, index)
    principled.inputs["Metallic"].default_value = material.metallic
    index = build_texture(principled, material, "Metallic", False, data, index)

    # Roughness
    material.roughness, index = common.decode_float(data, index)
    principled.inputs["Roughness"].default_value = material.roughness
    index = build_texture(principled, material, "Roughness", False, data, index)

    # Normal
    file_name, index = common.decode_string(data, index)
    if len(file_name) > 0:
        normal_map = material.node_tree.nodes.new("ShaderNodeNormalMap")
        material.node_tree.links.new(principled.inputs["Normal"], normal_map.outputs["Normal"])
        tex_image = material.node_tree.nodes.new("ShaderNodeTexImage")
        try:
            tex_image.image = bpy.data.images.load(get_resolved_file_path(file_name))
            tex_image.image.colorspace_settings.name = "Non-Color"
        except Exception as e:
            logger.error("could not load file %s ...", get_resolved_file_path(file_name))
            logger.error("... %s", e)
        material.node_tree.links.new(normal_map.inputs["Color"], tex_image.outputs["Color"])

    # Emission
    emission, index = common.decode_color(data, index)
    principled.inputs["Emission"].default_value = emission
    index = build_texture(principled, material, "Emission", False, data, index)