Exemplo n.º 1
0
def _decode_soas(buffer: bytes, index: int) -> Tuple[List[Soa], int]:
    path: List[Union[int, str]] = ["unknown"]
    name = "unknown"
    soas: List[Soa] = []
    try:
        # see soa_buffers()
        aos_count, index = decode_int(buffer, index)
        for _ in range(aos_count):
            path_string, index = decode_string(buffer, index)
            path = json.loads(path_string)

            logger.info("%s: %s ", "build_soa", path)

            element_count, index = decode_int(buffer, index)
            members = []
            for _ in range(element_count):
                name, index = decode_string(buffer, index)
                array_, index = decode_py_array(buffer, index)
                members.append((name, array_), )
            soas.append(Soa(path, members), )
    except Exception:
        logger.error(f"Exception while decoding for {path} {name}")
        for line in traceback.format_exc().splitlines():
            logger.error(line)
        logger.error("ignored")
        return []

    return soas, index
Exemplo n.º 2
0
def build_light(data):
    light_path, start = common.decode_string(data, 0)
    light_name, start = common.decode_string(data, start)
    logger.info("build_light %s", light_path)
    light_type, start = common.decode_int(data, start)
    blighttype = "POINT"
    if light_type == common.LightType.SUN.value:
        blighttype = "SUN"
    elif light_type == common.LightType.POINT.value:
        blighttype = "POINT"
    elif light_type == common.LightType.AREA.value:
        blighttype = "AREA"
    else:
        blighttype = "SPOT"

    light = get_or_create_light(light_name, blighttype)

    shadow, start = common.decode_int(data, start)
    if shadow != 0:
        light.use_shadow = True
    else:
        light.use_shadow = False

    color, start = common.decode_color(data, start)
    light.color = (color[0], color[1], color[2])
    light.energy, start = common.decode_float(data, start)
    if light_type == common.LightType.SPOT.value:
        light.spot_size, start = common.decode_float(data, start)
        light.spot_blend, start = common.decode_float(data, start)

    get_or_create_object_data(light_path, light)
Exemplo n.º 3
0
def decode_grease_pencil_stroke(grease_pencil_frame, stroke_index, data,
                                index):
    material_index, index = common.decode_int(data, index)
    line_width, index = common.decode_int(data, index)
    points, index = common.decode_array(data, index, "5f", 5 * 4)

    if stroke_index >= len(grease_pencil_frame.strokes):
        stroke = grease_pencil_frame.strokes.new()
    else:
        stroke = grease_pencil_frame.strokes[stroke_index]

    stroke.material_index = material_index
    stroke.line_width = line_width

    p = stroke.points
    if len(points) > len(p):
        p.add(len(points) - len(p))
    if len(points) < len(p):
        max_index = len(points) - 1
        for _i in range(max_index, len(p)):
            p.pop(max_index)

    for i in range(len(p)):
        point = points[i]
        p[i].co = (point[0], point[1], point[2])
        p[i].pressure = point[3]
        p[i].strength = point[4]
    return index
Exemplo n.º 4
0
def decode_grease_pencil_frame(grease_pencil_layer, data, index):
    grease_pencil_frame, index = common.decode_int(data, index)
    frame = None
    for f in grease_pencil_layer.frames:
        if f.frame_number == grease_pencil_frame:
            frame = f
            break
    if not frame:
        frame = grease_pencil_layer.frames.new(grease_pencil_frame)
    stroke_count, index = common.decode_int(data, index)
    for stroke_index in range(stroke_count):
        index = decode_grease_pencil_stroke(frame, stroke_index, data, index)
    return index
Exemplo n.º 5
0
 def build_remove_keyframe(self, data):
     index = 0
     name, index = common.decode_string(data, index)
     if name not in share_data.blender_objects:
         return name
     ob = share_data.blender_objects[name]
     channel, index = common.decode_string(data, index)
     channel_index, index = common.decode_int(data, index)
     if not hasattr(ob, channel):
         ob = ob.data
     frame, index = common.decode_int(data, index)
     ob.keyframe_delete(channel, index=channel_index, frame=frame)
     return name
Exemplo n.º 6
0
async def exec_buffer(reader: asyncio.StreamReader,
                      writer: asyncio.StreamWriter):
    while True:
        buffer = await reader.read(INT_SIZE)
        length, _ = decode_int(buffer, 0)
        buffer = await reader.read(length)
        if not buffer:
            break
        addr = writer.get_extra_info("peername")
        logger.debug("-- Received %s bytes from %s", len(buffer), addr)
        logger.debug(buffer.decode("utf-8"))
        buffer_string = buffer.decode("utf-8")
        try:
            code = compile(buffer_string, "<string>", "exec")
            share_data.pending_test_update = True
            exec(code, {})
        except Exception:
            import traceback

            logger.error("Exception")
            logger.error(traceback.format_exc())
            logger.error("While processing: ")
            for i, line in enumerate(buffer_string.splitlines()):
                logger.error(f"  {i:>5} {line}")

        logger.debug("-- Done")
Exemplo n.º 7
0
def decode_baked_mesh(obj: Optional[bpy.types.Object], data, index):
    # Note: Blender should not load a baked mesh but we have this function to debug the encoding part
    # and as an example for implementations that load baked meshes
    byte_size, index = common.decode_int(data, index)
    if byte_size == 0:
        return index

    positions, index = common.decode_vector3_array(data, index)
    normals, index = common.decode_vector3_array(data, index)
    uvs, index = common.decode_vector2_array(data, index)
    material_indices, index = common.decode_int_array(data, index)
    triangles, index = common.decode_int3_array(data, index)

    if obj is not None:
        bm = bmesh.new()
        for i in range(len(positions)):
            bm.verts.new(positions[i])
            # according to https://blender.stackexchange.com/questions/49357/bmesh-how-can-i-import-custom-vertex-normals
            # normals are not working for bmesh...
            # vertex.normal = normals[i]
        bm.verts.ensure_lookup_table()

        uv_layer = None
        if len(uvs) > 0:
            uv_layer = bm.loops.layers.uv.new()

        multi_material = False
        if len(material_indices) > 1:
            multi_material = True

        current_uv_index = 0
        for i in range(len(triangles)):
            triangle = triangles[i]
            i1 = triangle[0]
            i2 = triangle[1]
            i3 = triangle[2]
            try:
                face = bm.faces.new((bm.verts[i1], bm.verts[i2], bm.verts[i3]))
                if multi_material:
                    face.material_index = material_indices[i]
                else:
                    face.material_index = 0
                if uv_layer:
                    face.loops[0][uv_layer].uv = uvs[current_uv_index]
                    face.loops[1][uv_layer].uv = uvs[current_uv_index + 1]
                    face.loops[2][uv_layer].uv = uvs[current_uv_index + 2]
                    current_uv_index = current_uv_index + 3
            except Exception:
                pass

        me = obj.data

        bm.to_mesh(me)
        bm.free()

        # hack ! Since bmesh cannot be used to set custom normals
        me.normals_split_custom_set(normals)
        me.use_auto_smooth = True

    return index
Exemplo n.º 8
0
def build_camera(data):
    camera_path, start = common.decode_string(data, 0)
    logger.info("build_camera %s", camera_path)
    camera_name, start = common.decode_string(data, start)
    camera = get_or_create_camera(camera_name)

    camera.lens, start = common.decode_float(data, start)
    camera.clip_start, start = common.decode_float(data, start)
    camera.clip_end, start = common.decode_float(data, start)
    camera.dof.use_dof, start = common.decode_bool(data, start)
    camera.dof.aperture_fstop, start = common.decode_float(data, start)
    colimator_name, start = common.decode_string(data, start)
    sensor_fit, start = common.decode_int(data, start)
    camera.sensor_width, start = common.decode_float(data, start)
    camera.sensor_height, start = common.decode_float(data, start)

    if sensor_fit == 0:
        camera.sensor_fit = "AUTO"
    elif sensor_fit == 1:
        camera.sensor_fit = "VERTICAL"
    else:
        camera.sensor_fit = "HORIZONTAL"

    get_or_create_object_data(camera_path, camera)

    # colimator
    if len(colimator_name) > 0:
        camera.dof.use_dof = True
        camera.dof.focus_object = get_or_create_path(colimator_name)
Exemplo n.º 9
0
def receive_message(data):
    index = 0
    action, index = common.decode_int(data, index)

    if action == AssetBankAction.LIST_REQUEST:
        send_asset_bank_entries()
    elif action == AssetBankAction.IMPORT_REQUEST:
        import_asset(data, index)
Exemplo n.º 10
0
def decode_mesh_generic(client, mesh: bpy.types.Mesh, data, index):

    try:
        tmp_obj = bpy.data.objects.new("_mixer_tmp_", mesh)
        byte_size, index = common.decode_int(data, index)
        if byte_size == 0:
            # No base mesh, lets read the baked mesh
            index = decode_baked_mesh(tmp_obj, data, index)
        else:
            index = decode_base_mesh(client, tmp_obj, mesh, data, index)
            # Skip the baked mesh (its size is encoded here)
            baked_mesh_byte_size, index = common.decode_int(data, index)
            index += baked_mesh_byte_size
    finally:
        bpy.data.objects.remove(tmp_obj)

    return index
Exemplo n.º 11
0
def build_grease_pencil_mesh(data):
    grease_pencil_name, index = common.decode_string(data, 0)

    grease_pencil = share_data.blender_grease_pencils.get(grease_pencil_name)
    if not grease_pencil:
        grease_pencil = bpy.data.grease_pencils.new(grease_pencil_name)
        share_data._blender_grease_pencils[grease_pencil.name_full] = grease_pencil

    grease_pencil.materials.clear()
    material_count, index = common.decode_int(data, index)
    for _ in range(material_count):
        material_name, index = common.decode_string(data, index)
        material = share_data.blender_materials.get(material_name)
        grease_pencil.materials.append(material)

    layer_count, index = common.decode_int(data, index)
    for _ in range(layer_count):
        index = decode_grease_pencil_layer(grease_pencil, data, index)
Exemplo n.º 12
0
def decode_arrays(buffer: bytes, index) -> Tuple[ArrayGroups, int]:
    array_group_count, index = decode_int(buffer, index)
    if array_group_count == 0:
        return {}, index

    array_groups: ArrayGroups = {}
    for _groups_index in range(array_group_count):
        array_group_name, index = decode_string(buffer, index)
        array_group_length, index = decode_int(buffer, index)
        array_group: ArrayGroup = []
        for _array_index in range(array_group_length):
            key_string, index = decode_string(buffer, index)
            key = json.loads(key_string)
            array_, index = decode_py_array(buffer, index)
            array_group.append((key, array_), )
        array_groups[array_group_name] = array_group

    return array_groups, index
Exemplo n.º 13
0
def decode_grease_pencil_layer(grease_pencil, data, index):
    grease_pencil_layer_name, index = common.decode_string(data, index)
    layer = grease_pencil.get(grease_pencil_layer_name)
    if not layer:
        layer = grease_pencil.layers.new(grease_pencil_layer_name)
    layer.hide, index = common.decode_bool(data, index)
    frame_count, index = common.decode_int(data, index)
    for _ in range(frame_count):
        index = decode_grease_pencil_frame(layer, data, index)
    return index
Exemplo n.º 14
0
 def build_frame(self, data):
     start = 0
     frame, start = common.decode_int(data, start)
     if bpy.context.scene.frame_current != frame:
         previous_value = share_data.client.skip_next_depsgraph_update
         share_data.client.skip_next_depsgraph_update = False
         # bs = self.block_signals
         # self.block_signals = False
         bpy.context.scene.frame_set(frame)
         # self.block_signals = bs
         share_data.client.skip_next_depsgraph_update = previous_value
Exemplo n.º 15
0
 def build_texture_file(self, data):
     path, index = common.decode_string(data, 0)
     if not os.path.exists(path):
         size, index = common.decode_int(data, index)
         try:
             f = open(path, "wb")
             f.write(data[index : index + size])
             f.close()
             self.textures.add(path)
         except Exception as e:
             logger.error("could not write file %s ...", path)
             logger.error("... %s", e)
Exemplo n.º 16
0
    def build_add_keyframe(self, data):
        index = 0
        name, index = common.decode_string(data, index)
        if name not in share_data.blender_objects:
            return name
        ob = share_data.blender_objects[name]
        channel, index = common.decode_string(data, index)
        channel_index, index = common.decode_int(data, index)
        frame, index = common.decode_int(data, index)
        value, index = common.decode_float(data, index)

        if not hasattr(ob, channel):
            ob = ob.data

        attr = getattr(ob, channel)
        if channel_index != -1:
            attr[channel_index] = value
        else:
            attr = value
        setattr(ob, channel, attr)
        ob.keyframe_insert(channel, frame=float(frame), index=channel_index)
        return name
Exemplo n.º 17
0
def build_remove_constraint(data):
    index = 0
    constraint_type, index = common.decode_int(data, index)
    object_name, index = common.decode_string(data, index)
    ob = share_data.blender_objects[object_name]

    constraint = None
    if constraint_type == ConstraintType.PARENT:
        constraint = get_constraint(ob, "CHILD_OF")
    elif constraint_type == ConstraintType.LOOK_AT:
        constraint = get_constraint(ob, "TRACK_TO")

    if constraint is not None:
        ob.constraints.remove(constraint)
Exemplo n.º 18
0
def decode_bmesh_layer(data, index, layer_collection, element_seq,
                       decode_layer_value_func):
    layer_count, index = common.decode_int(data, index)
    while layer_count > len(layer_collection):
        if not layer_collection.is_singleton:
            layer_collection.new()
        else:
            layer_collection.verify()  # Will create a layer and returns it
            break  # layer_count should be one but break just in case
    for i in range(layer_count):
        layer = layer_collection[i]
        for elt in element_seq:
            index = decode_layer_value_func(elt, layer, data, index)
    return index
Exemplo n.º 19
0
def build_add_constraint(data):
    index = 0
    constraint_type, index = common.decode_int(data, index)
    object_name, index = common.decode_string(data, index)
    target_name, index = common.decode_string(data, index)

    ob = share_data.blender_objects[object_name]
    target = share_data.blender_objects[target_name]

    if constraint_type == ConstraintType.PARENT:
        add_parent_constraint(ob, target)
    elif constraint_type == ConstraintType.LOOK_AT:
        add_lookat_constraint(ob, target)
    else:
        logger.warning(f"Unknown constraint {constraint_type}")
Exemplo n.º 20
0
def decode_mesh(client, obj, data, index):
    assert obj.data

    # Clear materials before building faces because it erase material idx of faces
    obj.data.materials.clear()

    byte_size, index = common.decode_int(data, index)
    if byte_size == 0:
        # No base mesh, lets read the baked mesh
        index = decode_bakes_mesh(obj, data, index)
    else:
        index = decode_base_mesh(client, obj, data, index)
        # Skip the baked mesh (its size is encoded here)
        baked_mesh_byte_size, index = common.decode_int(data, index)
        index += baked_mesh_byte_size

    # Materials
    material_names, index = common.decode_string_array(data, index)
    for material_name in material_names:
        material = material_api.get_or_create_material(
            material_name) if material_name != "" else None
        obj.data.materials.append(material)

    return index
Exemplo n.º 21
0
def _decode_and_build_soas(uuid: Uuid, buffer: bytes, index: int):
    path: List[Union[int, str]] = ["unknown"]
    name = "unknown"
    try:
        # see soa_buffers()
        aos_count, index = decode_int(buffer, index)
        for _ in range(aos_count):
            path_string, index = decode_string(buffer, index)
            path = json.loads(path_string)

            logger.info("%s: %s %s", "build_soa", uuid, path)

            element_count, index = decode_int(buffer, index)
            soas = []
            for _ in range(element_count):
                name, index = decode_string(buffer, index)
                array, index = decode_py_array(buffer, index)
                soas.append((name, array))
            share_data.bpy_data_proxy.update_soa(uuid, path, soas)
    except Exception:
        logger.error(f"Exception during build_soa for {uuid} {path} {name}")
        for line in traceback.format_exc().splitlines():
            logger.error(line)
        logger.error("ignored")
Exemplo n.º 22
0
def decode_base_mesh(client, obj, data, index):
    bm = bmesh.new()

    position_count, index = common.decode_int(data, index)
    logger.debug("Reading %d vertices", position_count)

    for _pos_idx in range(position_count):
        co, index = common.decode_vector3(data, index)
        bm.verts.new(co)

    bm.verts.ensure_lookup_table()

    index = decode_bmesh_layer(data, index, bm.verts.layers.bevel_weight,
                               bm.verts, decode_layer_float)

    edge_count, index = common.decode_int(data, index)
    logger.debug("Reading %d edges", edge_count)

    edges_data = struct.unpack(f"{edge_count * 4}I",
                               data[index:index + edge_count * 4 * 4])
    index += edge_count * 4 * 4

    for edge_idx in range(edge_count):
        v1 = edges_data[edge_idx * 4]
        v2 = edges_data[edge_idx * 4 + 1]
        edge = bm.edges.new((bm.verts[v1], bm.verts[v2]))
        edge.smooth = bool(edges_data[edge_idx * 4 + 2])
        edge.seam = bool(edges_data[edge_idx * 4 + 3])

    index = decode_bmesh_layer(data, index, bm.edges.layers.bevel_weight,
                               bm.edges, decode_layer_float)
    index = decode_bmesh_layer(data, index, bm.edges.layers.crease, bm.edges,
                               decode_layer_float)

    face_count, index = common.decode_int(data, index)
    logger.debug("Reading %d faces", face_count)

    for _face_idx in range(face_count):
        material_idx, index = common.decode_int(data, index)
        smooth, index = common.decode_bool(data, index)
        vert_count, index = common.decode_int(data, index)
        face_vertices = struct.unpack(f"{vert_count}I",
                                      data[index:index + vert_count * 4])
        index += vert_count * 4
        verts = [bm.verts[i] for i in face_vertices]
        face = bm.faces.new(verts)
        face.material_index = material_idx
        face.smooth = smooth

    index = decode_bmesh_layer(data, index, bm.faces.layers.face_map, bm.faces,
                               decode_layer_int)

    index = decode_bmesh_layer(data, index, bm.loops.layers.uv,
                               loops_iterator(bm), decode_layer_uv)
    index = decode_bmesh_layer(data, index, bm.loops.layers.color,
                               loops_iterator(bm), decode_layer_color)

    bm.normal_update()
    bm.to_mesh(obj.data)
    bm.free()

    # Load shape keys
    shape_keys_count, index = common.decode_int(data, index)
    obj.shape_key_clear()
    if shape_keys_count > 0:
        logger.debug("Loading %d shape keys", shape_keys_count)
        shapes_keys_list = []
        for _i in range(shape_keys_count):
            shape_key_name, index = common.decode_string(data, index)
            shapes_keys_list.append(obj.shape_key_add(name=shape_key_name))
        for i in range(shape_keys_count):
            shapes_keys_list[i].vertex_group, index = common.decode_string(
                data, index)
        for i in range(shape_keys_count):
            relative_key_name, index = common.decode_string(data, index)
            shapes_keys_list[i].relative_key = obj.data.shape_keys.key_blocks[
                relative_key_name]

        for i in range(shape_keys_count):
            shape_key = shapes_keys_list[i]
            shape_key.mute, index = common.decode_bool(data, index)
            shape_key.value, index = common.decode_float(data, index)
            shape_key.slider_min, index = common.decode_float(data, index)
            shape_key.slider_max, index = common.decode_float(data, index)
            shape_key_data_size, index = common.decode_int(data, index)
            for i in range(shape_key_data_size):
                shape_key.data[i].co = Vector(
                    struct.unpack("3f", data[index:index + 3 * 4]))
                index += 3 * 4
        obj.data.shape_keys.use_relative, index = common.decode_bool(
            data, index)

    # Vertex Groups
    vg_count, index = common.decode_int(data, index)
    obj.vertex_groups.clear()
    for _i in range(vg_count):
        vg_name, index = common.decode_string(data, index)
        vertex_group = obj.vertex_groups.new(name=vg_name)
        vertex_group.lock_weight, index = common.decode_bool(data, index)
        vg_size, index = common.decode_int(data, index)
        for _elmt_idx in range(vg_size):
            vert_idx, index = common.decode_int(data, index)
            weight, index = common.decode_float(data, index)
            vertex_group.add([vert_idx], weight, "REPLACE")

    # Normals
    obj.data.use_auto_smooth, index = common.decode_bool(data, index)
    obj.data.auto_smooth_angle, index = common.decode_float(data, index)

    has_custom_normal, index = common.decode_bool(data, index)

    if has_custom_normal:
        normals = []
        for _loop in obj.data.loops:
            normal, index = common.decode_vector3(data, index)
            normals.append(normal)
        obj.data.normals_split_custom_set(normals)

    # UV Maps and Vertex Colors are added automatically based on layers in the bmesh
    # We just need to update their name and active_render state:

    # UV Maps
    for uv_layer in obj.data.uv_layers:
        uv_layer.name, index = common.decode_string(data, index)
        uv_layer.active_render, index = common.decode_bool(data, index)

    # Vertex Colors
    for vertex_colors in obj.data.vertex_colors:
        vertex_colors.name, index = common.decode_string(data, index)
        vertex_colors.active_render, index = common.decode_bool(data, index)

    return index
Exemplo n.º 23
0
def decode_layer_int(elmt, layer, data, index):
    elmt[layer], index = common.decode_int(data, index)
    return index
Exemplo n.º 24
0
def build_shot_manager_action(data):
    sm_props = get_shot_manager()
    if sm_props is None:
        return
    get_or_set_current_take(sm_props)

    index = 0
    action, index = common.decode_int(data, index)
    shot_index, index = common.decode_int(data, index)
    # bpy.context.scene.UAS_shot_manager_props.selected_shot_index = shot_index
    bpy.context.scene.UAS_shot_manager_props.setSelectedShotByIndex(shot_index)

    # Add
    if action == SMAction.ADD_SHOT:
        shot_name, index = common.decode_string(data, index)
        start, index = common.decode_int(data, index)
        end, index = common.decode_int(data, index)
        camera_name, index = common.decode_string(data, index)
        camera = None
        if len(camera_name) > 0:
            camera = bpy.data.objects[camera_name]

        color, index = common.decode_color(data, index)

        # bpy.context.scene.UAS_shot_manager_props.get_isInitialized()
        shot_manager.add_shot(
            sm_props,
            at_index=shot_index,
            take_index=-1,
            name=shot_name,
            start=
            start,  # avoid using a short start value before the lenght of the handles (which is 10)
            end=end,
            camera=camera,
            color=(color[0], color[1], color[2], 1),
            enabled=True,
        )
    # Delete
    elif action == SMAction.DELETE_SHOT:
        s = shot_manager.get_shot(sm_props, shot_index)
        shot_manager.remove_shot(sm_props, s)
    # Duplicate
    elif action == SMAction.DUPLICATE_SHOT:
        s = shot_manager.get_shot(sm_props, shot_index)
        new_shot = shot_manager.copy_shot(sm_props,
                                          shot=s,
                                          at_index=shot_index + 1)
        shot_name, index = common.decode_string(data, index)
        shot.set_name(new_shot, shot_name)
    # Move
    elif action == SMAction.MOVE_SHOT:
        s = shot_manager.get_shot(sm_props, shot_index)
        offset, index = common.decode_int(data, index)
        shot_manager.move_shot_to_index(sm_props,
                                        shot=s,
                                        new_index=(shot_index + offset))
    # Update
    elif action == SMAction.UPDATE_SHOT:
        # take = bpy.context.scene.UAS_shot_manager_props.current_take_name
        start, index = common.decode_int(data, index)
        end, index = common.decode_int(data, index)
        camera, index = common.decode_string(data, index)
        color, index = common.decode_color(data, index)
        enabled, index = common.decode_int(data, index)
        s = shot_manager.get_shot(sm_props, shot_index)
        if start > -1:
            shot.set_start(s, start)
        if end > -1:
            shot.set_end(s, end)
        if len(camera) > 0:
            shot.set_camera(s, bpy.data.objects[camera])
        if enabled != -1:
            shot.set_enable_state(s, enabled)
Exemplo n.º 25
0
def decode_bakes_mesh(obj, data, index):
    # Note: Blender should not load a baked mesh but we have this function to debug the encoding part
    # and as an exemple for implementations that load baked meshes
    byte_size, index = common.decode_int(data, index)
    if byte_size == 0:
        return index

    positions, index = common.decode_vector3_array(data, index)
    normals, index = common.decode_vector3_array(data, index)
    uvs, index = common.decode_vector2_array(data, index)
    material_indices, index = common.decode_int2_array(data, index)
    triangles, index = common.decode_int3_array(data, index)

    bm = bmesh.new()
    for i in range(len(positions)):
        vertex = bm.verts.new(positions[i])
        # according to https://blender.stackexchange.com/questions/49357/bmesh-how-can-i-import-custom-vertex-normals
        # normals are not working for bmesh...
        vertex.normal = normals[i]
    bm.verts.ensure_lookup_table()

    uv_layer = None
    if len(uvs) > 0:
        uv_layer = bm.loops.layers.uv.new()

    current_material_index = 0
    index_in_material_indices = 0
    next_triangle_index = len(triangles)
    if len(material_indices) > 1:
        next_triangle_index = material_indices[index_in_material_indices +
                                               1][0]
    if len(material_indices) > 0:
        current_material_index = material_indices[index_in_material_indices][1]

    for i in range(len(triangles)):
        if i >= next_triangle_index:
            index_in_material_indices = index_in_material_indices + 1
            next_triangle_index = len(triangles)
            if len(material_indices) > index_in_material_indices + 1:
                next_triangle_index = material_indices[
                    index_in_material_indices + 1][0]
            current_material_index = material_indices[
                index_in_material_indices][1]

        triangle = triangles[i]
        i1 = triangle[0]
        i2 = triangle[1]
        i3 = triangle[2]
        try:
            face = bm.faces.new((bm.verts[i1], bm.verts[i2], bm.verts[i3]))
            face.material_index = current_material_index
            if uv_layer:
                face.loops[0][uv_layer].uv = uvs[i1]
                face.loops[1][uv_layer].uv = uvs[i2]
                face.loops[2][uv_layer].uv = uvs[i3]
        except Exception:
            pass

    me = obj.data

    bm.to_mesh(me)
    bm.free()

    # hack ! Since bmesh cannot be used to set custom normals
    normals2 = []
    for l in me.loops:
        normals2.append(normals[l.vertex_index])
    me.normals_split_custom_set(normals2)
    me.use_auto_smooth = True

    return index
Exemplo n.º 26
0
    def network_consumer(self):
        """
        This method can be considered the entry point of this class. It is meant to be called regularly to send
        pending commands to the server, and receive then process new ones.

        Pending commands are accumulated with add_command(), most calls originate from handlers function.

        Incoming commands are read from the socket and directly processed here to update Blender's data. This can
        be costly and a possible optimization in the future would be to split the processing accross several timer
        run. This can be challenging because we need to keep the current update state. Maybe this can be solved
        naturally with coroutines.

        We call it from the timer registered by the addon.
        """

        from mixer.bl_panels import redraw as redraw_panels, update_ui_lists

        assert self.is_connected()

        set_draw_handlers()

        # Loop remains infinite while we have GROUP_BEGIN commands without their corresponding GROUP_END received
        # todo Change this -> probably not a good idea because the sending client might disconnect before GROUP_END occurs
        # or it needs to be guaranteed by the server
        group_count = 0
        while True:
            received_commands = self.fetch_commands(get_mixer_prefs().commands_send_interval)

            set_dirty = True
            # Process all received commands
            for command in received_commands:
                if self._joining and command.type.value > common.MessageType.COMMAND.value:
                    self._received_byte_size += command.byte_size()
                    self._received_command_count += 1
                    if self._joining_room_name in self.rooms_attributes:
                        get_mixer_props().joining_percentage = (
                            self._received_byte_size
                            / self.rooms_attributes[self._joining_room_name][RoomAttributes.BYTE_SIZE]
                        )
                        redraw_panels()

                if command.type == MessageType.GROUP_BEGIN:
                    group_count += 1
                    continue

                if command.type == MessageType.GROUP_END:
                    group_count -= 1
                    continue

                if self.has_default_handler(command.type):
                    if command.type == MessageType.JOIN_ROOM and self._joining:
                        self._joining = False
                        get_mixer_props().joining_percentage = 1

                    update_ui_lists()
                    self.block_signals = False  # todo investigate why we should but this to false here
                    continue

                if set_dirty:
                    share_data.set_dirty()
                    set_dirty = False

                self.block_signals = True

                try:
                    # manage wrapped commands with this blender id
                    # time synced command for now
                    # Consume messages with its client_id to receive commands from other clients
                    # like play/pause. Ignore all other client_id.
                    if command.type == MessageType.CLIENT_ID_WRAPPER:
                        id, index = common.decode_string(command.data, 0)
                        if id != share_data.client.client_id:
                            continue
                        command_type, index = common.decode_int(command.data, index)
                        command_data = command.data[index:]
                        command = common.Command(command_type, command_data)

                    if command.type == MessageType.CONTENT:
                        # The server asks for scene content (at room creation)
                        try:
                            assert share_data.client.current_room is not None
                            self.set_room_attributes(
                                share_data.client.current_room,
                                # Documentation to update if you change "experimental_sync": doc/protocol.md
                                {"experimental_sync": get_mixer_prefs().experimental_sync},
                            )
                            send_scene_content()
                            # Inform end of content
                            self.add_command(common.Command(MessageType.CONTENT))
                        except Exception as e:
                            raise SendSceneContentFailed() from e
                        continue

                    # Put this to true by default
                    # todo Check build commands that do not trigger depsgraph update
                    # because it can lead to ignoring real updates when a false positive is encountered
                    command_triggers_depsgraph_update = True

                    if command.type == MessageType.GREASE_PENCIL_MESH:
                        grease_pencil_api.build_grease_pencil_mesh(command.data)
                    elif command.type == MessageType.GREASE_PENCIL_MATERIAL:
                        grease_pencil_api.build_grease_pencil_material(command.data)
                    elif command.type == MessageType.GREASE_PENCIL_CONNECTION:
                        grease_pencil_api.build_grease_pencil_connection(command.data)

                    elif command.type == MessageType.CLEAR_CONTENT:
                        clear_scene_content()
                        self._joining = True
                        self._received_command_count = 0
                        self._received_byte_size = 0
                        get_mixer_props().joining_percentage = 0
                        redraw_panels()
                    elif command.type == MessageType.MESH:
                        self.build_mesh(command.data)
                    elif command.type == MessageType.TRANSFORM:
                        self.build_transform(command.data)
                    elif command.type == MessageType.MATERIAL:
                        material_api.build_material(command.data)
                    elif command.type == MessageType.ASSIGN_MATERIAL:
                        material_api.build_assign_material(command.data)
                    elif command.type == MessageType.DELETE:
                        self.build_delete(command.data)
                    elif command.type == MessageType.CAMERA:
                        camera_api.build_camera(command.data)
                    elif command.type == MessageType.LIGHT:
                        light_api.build_light(command.data)
                    elif command.type == MessageType.RENAME:
                        self.build_rename(command.data)
                    elif command.type == MessageType.DUPLICATE:
                        self.build_duplicate(command.data)
                    elif command.type == MessageType.SEND_TO_TRASH:
                        self.build_send_to_trash(command.data)
                    elif command.type == MessageType.RESTORE_FROM_TRASH:
                        self.build_restore_from_trash(command.data)
                    elif command.type == MessageType.TEXTURE:
                        self.build_texture_file(command.data)

                    elif command.type == MessageType.COLLECTION:
                        collection_api.build_collection(command.data)
                    elif command.type == MessageType.COLLECTION_REMOVED:
                        collection_api.build_collection_removed(command.data)

                    elif command.type == MessageType.INSTANCE_COLLECTION:
                        collection_api.build_collection_instance(command.data)

                    elif command.type == MessageType.ADD_COLLECTION_TO_COLLECTION:
                        collection_api.build_collection_to_collection(command.data)
                    elif command.type == MessageType.REMOVE_COLLECTION_FROM_COLLECTION:
                        collection_api.build_remove_collection_from_collection(command.data)
                    elif command.type == MessageType.ADD_OBJECT_TO_COLLECTION:
                        collection_api.build_add_object_to_collection(command.data)
                    elif command.type == MessageType.REMOVE_OBJECT_FROM_COLLECTION:
                        collection_api.build_remove_object_from_collection(command.data)

                    elif command.type == MessageType.ADD_COLLECTION_TO_SCENE:
                        scene_api.build_collection_to_scene(command.data)
                    elif command.type == MessageType.REMOVE_COLLECTION_FROM_SCENE:
                        scene_api.build_remove_collection_from_scene(command.data)
                    elif command.type == MessageType.ADD_OBJECT_TO_SCENE:
                        scene_api.build_add_object_to_scene(command.data)
                    elif command.type == MessageType.REMOVE_OBJECT_FROM_SCENE:
                        scene_api.build_remove_object_from_scene(command.data)

                    elif command.type == MessageType.SCENE:
                        scene_api.build_scene(command.data)
                    elif command.type == MessageType.SCENE_REMOVED:
                        scene_api.build_scene_removed(command.data)
                    elif command.type == MessageType.SCENE_RENAMED:
                        scene_api.build_scene_renamed(command.data)

                    elif command.type == MessageType.OBJECT_VISIBILITY:
                        object_api.build_object_visibility(command.data)

                    elif command.type == MessageType.FRAME:
                        self.build_frame(command.data)
                    elif command.type == MessageType.QUERY_CURRENT_FRAME:
                        self.query_current_frame()

                    elif command.type == MessageType.PLAY:
                        self.build_play(command.data)
                    elif command.type == MessageType.PAUSE:
                        self.build_pause(command.data)
                    elif command.type == MessageType.ADD_KEYFRAME:
                        self.build_add_keyframe(command.data)
                    elif command.type == MessageType.REMOVE_KEYFRAME:
                        self.build_remove_keyframe(command.data)
                    elif command.type == MessageType.QUERY_OBJECT_DATA:
                        self.build_query_object_data(command.data)

                    elif command.type == MessageType.CLEAR_ANIMATIONS:
                        self.build_clear_animations(command.data)
                    elif command.type == MessageType.SHOT_MANAGER_MONTAGE_MODE:
                        self.build_montage_mode(command.data)
                    elif command.type == MessageType.SHOT_MANAGER_ACTION:
                        shot_manager.build_shot_manager_action(command.data)

                    elif command.type == MessageType.BLENDER_DATA_UPDATE:
                        data_api.build_data_update(command.data)
                    elif command.type == MessageType.BLENDER_DATA_REMOVE:
                        data_api.build_data_remove(command.data)
                    else:
                        # Command is ignored, so no depsgraph update can be triggered
                        command_triggers_depsgraph_update = False

                    if command_triggers_depsgraph_update:
                        self.skip_next_depsgraph_update = True

                except Exception as e:
                    logger.warning(f"Exception during processing of message {str(command.type)}")
                    log_traceback(logger.warning)
                    if get_mixer_prefs().env == "development" or isinstance(e, SendSceneContentFailed):
                        raise

                finally:
                    self.block_signals = False

            if group_count == 0:
                break

        if not set_dirty:
            share_data.update_current_data()

        # Some objects may have been obtained before their parent
        # In that case we resolve parenting here
        # todo Parenting strategy should be changed: we should store the name of the parent in the command instead of
        # having a path as name
        if len(share_data.pending_parenting) > 0:
            remaining_parentings = set()
            for path in share_data.pending_parenting:
                path_elem = path.split("/")
                ob = None
                parent = None
                for elem in path_elem:
                    ob = share_data.blender_objects.get(elem)
                    if not ob:
                        remaining_parentings.add(path)
                        break
                    if ob.parent != parent:  # do it only if needed, otherwise it resets matrix_parent_inverse
                        ob.parent = parent
                    parent = ob
            share_data.pending_parenting = remaining_parentings

        self.set_client_attributes(self.compute_client_custom_attributes())