def soa_buffers(datablock_proxy: Optional[DatablockProxy]) -> List[bytes]: if datablock_proxy is None: # empty update, should not happen return [encode_int(0)] # Layout is # number of AosProxy: 2 # soa path in datablock : ("vertices") # number of SoaElement : 2 # element name: "co" # array # element name: "normals" # array # soa path in datablock : ("edges") # number of SoaElement : 1 # element name: "vertices" # array items: List[bytes] = [] items.append(encode_int(len(datablock_proxy._soas))) for path, soa_proxies in datablock_proxy._soas.items(): path_string = json.dumps(path) items.append(encode_string(path_string)) items.append(encode_int(len(soa_proxies))) for element_name, soa_element in soa_proxies: if soa_element._array is not None: items.append(encode_string(element_name)) items.append(encode_py_array(soa_element._array)) return items
def send_animation_buffer(self, obj_name, animation_data, channel_name, channel_index=-1): if not animation_data: return action = animation_data.action if not action: buffer = ( common.encode_string(obj_name) + common.encode_string(channel_name) + common.encode_int(channel_index) + common.int_to_bytes(0, 4) # send empty buffer ) self.add_command(common.Command(MessageType.ANIMATION, buffer, 0)) return for fcurve in action.fcurves: if fcurve.data_path == channel_name: if channel_index == -1 or fcurve.array_index == channel_index: key_count = len(fcurve.keyframe_points) times = [] values = [] for keyframe in fcurve.keyframe_points: times.append(int(keyframe.co[0])) values.append(keyframe.co[1]) buffer = ( common.encode_string(obj_name) + common.encode_string(channel_name) + common.encode_int(channel_index) + common.int_to_bytes(key_count, 4) + struct.pack(f"{len(times)}i", *times) + struct.pack(f"{len(values)}f", *values) ) self.add_command(common.Command(MessageType.ANIMATION, buffer, 0)) return
def get_light_buffer(obj): light = obj.data light_type_name = light.type light_type = common.LightType.SUN if light_type_name == "POINT": light_type = common.LightType.POINT elif light_type_name == "SPOT": light_type = common.LightType.SPOT elif light_type_name == "SUN": light_type = common.LightType.SUN elif light_type_name == "AREA": light_type = common.LightType.AREA else: return None color = light.color power = light.energy if bpy.context.scene.render.engine == "CYCLES": shadow = light.cycles.cast_shadow else: shadow = light.use_shadow spot_blend = 10.0 spot_size = 0.0 if light_type == common.LightType.SPOT: spot_size = light.spot_size spot_blend = light.spot_blend return (common.encode_string(get_object_path(obj)) + common.encode_string(light.name_full) + common.encode_int(light_type.value) + common.encode_int(shadow) + common.encode_color(color) + common.encode_float(power) + common.encode_float(spot_size) + common.encode_float(spot_blend))
def encode_mesh(obj, do_encode_base_mesh, do_encode_baked_mesh): binary_buffer = bytes() if do_encode_base_mesh: logger.info("encode_base_mesh %s", obj.name_full) mesh_buffer = encode_base_mesh(obj) binary_buffer += common.encode_int(len(mesh_buffer)) binary_buffer += mesh_buffer else: binary_buffer += common.encode_int(0) if do_encode_baked_mesh: logger.info("encode_baked_mesh %s", obj.name_full) mesh_buffer = encode_baked_mesh(obj) binary_buffer += common.encode_int(len(mesh_buffer)) binary_buffer += mesh_buffer else: binary_buffer += common.encode_int(0) # Materials materials = [] for material in obj.data.materials: materials.append(material.name_full if material is not None else "") binary_buffer += common.encode_string_array(materials) return binary_buffer
def send_scene(): get_state() buffer = common.encode_int(len(share_data.shot_manager.shots)) for s in share_data.shot_manager.shots: buffer += (common.encode_string(s.name) + common.encode_string(s.camera_name) + common.encode_int(s.start) + common.encode_int(s.end) + common.encode_bool(s.enabled)) share_data.client.add_command( common.Command(common.MessageType.SHOT_MANAGER_CONTENT, buffer, 0))
def send_grease_pencil_stroke(stroke): buffer = common.encode_int(stroke.material_index) buffer += common.encode_int(stroke.line_width) points = list() for point in stroke.points: points.extend(point.co) points.append(point.pressure) points.append(point.strength) binary_points_buffer = common.int_to_bytes(len(stroke.points), 4) + struct.pack(f"{len(points)}f", *points) buffer += binary_points_buffer return buffer
def add_command(self, command: common.Command): # A wrapped message is a message emitted from a frame change event. # Right now we wrap this kind of messages adding the client_id. # In the future we will probably always add the client_id to all messages. But the difference # between synced time messages and the other must remain. if self.synced_time_messages: command = common.encode_int(command.type.value) + command.data if self.command_pack is None: self.command_pack = common.Command( MessageType.CLIENT_ID_WRAPPER, common.encode_string(self.client_id), 0 ) self.command_pack.data += common.encode_int(len(command)) + command else: super().add_command(command)
def send_group_begin(self): # The integer sent is for future use: the server might fill it with the group size once all messages # have been received, and give the opportunity to future clients to know how many messages they need to process # in the group (en probably show a progress bar to their user if their is a lot of message, e.g. initial scene # creation) self.add_command( common.Command(MessageType.GROUP_BEGIN, common.encode_int(0)))
def send_texture_data(self, path, data): name_buffer = common.encode_string(path) self.textures.add(path) self.add_command( common.Command(MessageType.TEXTURE, name_buffer + common.encode_int(len(data)) + data, 0))
def get_camera_buffer(obj): cam = obj.data focal = cam.lens front_clip_plane = cam.clip_start far_clip_plane = cam.clip_end dof_enabled = cam.dof.use_dof aperture = cam.dof.aperture_fstop colimator_name = cam.dof.focus_object.name_full if cam.dof.focus_object is not None else "" sensor_fit_name = cam.sensor_fit sensor_fit = common.SensorFitMode.AUTO if sensor_fit_name == "AUTO": sensor_fit = common.SensorFitMode.AUTO elif sensor_fit_name == "HORIZONTAL": sensor_fit = common.SensorFitMode.HORIZONTAL elif sensor_fit_name == "VERTICAL": sensor_fit = common.SensorFitMode.VERTICAL sensor_width = cam.sensor_width sensor_height = cam.sensor_height path = get_object_path(obj) return (common.encode_string(path) + common.encode_string(obj.name_full) + common.encode_float(focal) + common.encode_float(front_clip_plane) + common.encode_float(far_clip_plane) + common.encode_bool(dof_enabled) + common.encode_float(aperture) + common.encode_string(colimator_name) + common.encode_int(sensor_fit.value) + common.encode_float(sensor_width) + common.encode_float(sensor_height))
def send_grease_pencil_layer(layer, name): buffer = common.encode_string(name) buffer += common.encode_bool(layer.hide) buffer += common.encode_int(len(layer.frames)) for frame in layer.frames: buffer += send_grease_pencil_frame(frame) return buffer
def get_camera_buffer(obj): cam = obj.data focal = cam.lens front_clip_plane = cam.clip_start far_clip_plane = cam.clip_end aperture = cam.dof.aperture_fstop sensor_fit_name = cam.sensor_fit sensor_fit = common.SensorFitMode.AUTO if sensor_fit_name == "AUTO": sensor_fit = common.SensorFitMode.AUTO elif sensor_fit_name == "HORIZONTAL": sensor_fit = common.SensorFitMode.HORIZONTAL elif sensor_fit_name == "VERTICAL": sensor_fit = common.SensorFitMode.VERTICAL sensor_width = cam.sensor_width sensor_height = cam.sensor_height path = get_object_path(obj) return ( common.encode_string(path) + common.encode_string(obj.name_full) + common.encode_float(focal) + common.encode_float(front_clip_plane) + common.encode_float(far_clip_plane) + common.encode_float(aperture) + common.encode_int(sensor_fit.value) + common.encode_float(sensor_width) + common.encode_float(sensor_height) )
def send_string(self, script: str): # ensure that Blender processes the scripts one by one, # otherwise they get buffered here on startup and Blender gets all the scripts at once before # the initial synchronization is done buffer = script.encode("utf-8") length_buffer = encode_int(len(buffer)) self._sock.send(length_buffer) self._sock.send(buffer)
def send_frame(): sm_props = get_shot_manager() if sm_props is None: return current_shot_index = shot_manager.get_current_shot_index(sm_props) if share_data.shot_manager.current_shot_index != current_shot_index: share_data.shot_manager.current_shot_index = current_shot_index buffer = common.encode_int(share_data.shot_manager.current_shot_index) share_data.client.add_command(common.Command(common.MessageType.SHOT_MANAGER_CURRENT_SHOT, buffer, 0))
def send_grease_pencil_mesh(client: Client, obj): grease_pencil = obj.data buffer = common.encode_string(grease_pencil.name_full) buffer += common.encode_int(len(grease_pencil.materials)) for material in grease_pencil.materials: if not material: material_name = "Default" else: material_name = material.name_full buffer += common.encode_string(material_name) buffer += common.encode_int(len(grease_pencil.layers)) for name, layer in grease_pencil.layers.items(): buffer += send_grease_pencil_layer(layer, name) client.add_command(common.Command(common.MessageType.GREASE_PENCIL_MESH, buffer, 0)) send_grease_pencil_time_offset(client, obj)
def send_grease_pencil_time_offset(client: Client, obj): grease_pencil = obj.data buffer = common.encode_string(grease_pencil.name_full) for modifier in obj.grease_pencil_modifiers: if modifier.type != "GP_TIME": continue offset = modifier.offset scale = modifier.frame_scale custom_range = modifier.use_custom_frame_range frame_start = modifier.frame_start frame_end = modifier.frame_end buffer += (common.encode_int(offset) + common.encode_float(scale) + common.encode_bool(custom_range) + common.encode_int(frame_start) + common.encode_int(frame_end)) client.add_command( common.Command(common.MessageType.GREASE_PENCIL_TIME_OFFSET, buffer, 0)) break
def encode_arrays(datablock_proxy: DatablockProxy) -> List[bytes]: if not hasattr(datablock_proxy, "_arrays"): return [encode_int(0)] items = [] items.append(encode_int(len(datablock_proxy._arrays))) for array_group_name, arrays in datablock_proxy._arrays.items(): # for vertex groups, _arrays layout is # { "vertex_groups: [ # ([0, "i"], indices_array_of_vertex_group_0), # ([0, "w"], weights_array_of_vertex_group_0), # ... # ]} items.append(encode_string(array_group_name)) items.append(encode_int(len(arrays))) for key, array_ in arrays: key_string = json.dumps(key) items.append(encode_string(key_string)) items.append(encode_py_array(array_)) return items
def send_asset_bank_entries(): if bpy.context.window_manager.uas_asset_bank is None: return bpy.ops.uas.asset_bank_refresh() assets = bpy.context.window_manager.uas_asset_bank.assets names = [] tags = [] thumbnails = [] for asset in assets: names.append(asset.nice_name) tags.append(asset.tags) thumbnails.append(asset.thumbnail_path) buffer = (common.encode_int(AssetBankAction.LIST_RESPONSE) + common.encode_string_array(names) + common.encode_string_array(tags) + common.encode_string_array(thumbnails)) share_data.client.add_command( common.Command(common.MessageType.ASSET_BANK, buffer, 0))
def send_grease_pencil_frame(frame): buffer = common.encode_int(frame.frame_number) buffer += common.encode_int(len(frame.strokes)) for stroke in frame.strokes: buffer += send_grease_pencil_stroke(stroke) return buffer
def send_frame_start_end(self, start, end): self.add_command( common.Command(MessageType.FRAME_START_END, common.encode_int(start) + common.encode_int(end), 0) )
def send_frame(self, frame): self.add_command(common.Command(MessageType.FRAME, common.encode_int(frame), 0))
def send_add_constraint(client: Client, object_: bpy.types.Object, constraint_type: ConstraintType, target: str): buffer = common.encode_int(constraint_type) + common.encode_string( object_.name_full) + common.encode_string(target) client.add_command( common.Command(common.MessageType.ADD_CONSTRAINT, buffer, 0))
def send_remove_constraints(client: Client, object_: bpy.types.Object, constraint_type: ConstraintType): buffer = common.encode_int(constraint_type) + common.encode_string( object_.name_full) client.add_command( common.Command(common.MessageType.REMOVE_CONSTRAINT, buffer, 0))
def send_imported_object_name(blender_name: str, nice_name: str): buffer = (common.encode_int(AssetBankAction.IMPORT_RESPONSE) + common.encode_string(blender_name) + common.encode_string(nice_name)) share_data.client.add_command( common.Command(common.MessageType.ASSET_BANK, buffer, 0))
def encode_base_mesh(obj): # Temporary for curves and other objects that support to_mesh() # #todo Implement correct base encoding for these objects mesh_data = obj.data if obj.type == "MESH" else obj.to_mesh() if mesh_data is None: # This happens for empty curves # This is temporary, when curves will be fully implemented we will encode something return bytes() binary_buffer = encode_base_mesh_geometry(mesh_data) # Shape keys # source https://blender.stackexchange.com/questions/111661/creating-shape-keys-using-python if mesh_data.shape_keys is None: binary_buffer += common.encode_int(0) # Indicate 0 key blocks else: logger.debug("Writing %d shape keys", len(mesh_data.shape_keys.key_blocks)) binary_buffer += common.encode_int(len( mesh_data.shape_keys.key_blocks)) # Encode names for key_block in mesh_data.shape_keys.key_blocks: binary_buffer += common.encode_string(key_block.name) # Encode vertex group names for key_block in mesh_data.shape_keys.key_blocks: binary_buffer += common.encode_string(key_block.vertex_group) # Encode relative key names for key_block in mesh_data.shape_keys.key_blocks: binary_buffer += common.encode_string(key_block.relative_key.name) # Encode data shape_keys_buffer = [] fmt_str = "" for key_block in mesh_data.shape_keys.key_blocks: shape_keys_buffer.extend( (key_block.mute, key_block.value, key_block.slider_min, key_block.slider_max, len(key_block.data))) fmt_str += f"1I1f1f1f1I{(3 * len(key_block.data))}f" for i in range(len(key_block.data)): shape_keys_buffer.extend(key_block.data[i].co) binary_buffer += struct.pack(f"{fmt_str}", *shape_keys_buffer) binary_buffer += common.encode_bool(mesh_data.shape_keys.use_relative) # Vertex Groups verts_per_group = {} for vertex_group in obj.vertex_groups: verts_per_group[vertex_group.index] = [] for vert in mesh_data.vertices: for vg in vert.groups: verts_per_group[vg.group].append((vert.index, vg.weight)) binary_buffer += common.encode_int(len(obj.vertex_groups)) for vertex_group in obj.vertex_groups: binary_buffer += common.encode_string(vertex_group.name) binary_buffer += common.encode_bool(vertex_group.lock_weight) binary_buffer += common.encode_int( len(verts_per_group[vertex_group.index])) for vg_elmt in verts_per_group[vertex_group.index]: binary_buffer += common.encode_int(vg_elmt[0]) binary_buffer += common.encode_float(vg_elmt[1]) # Normals binary_buffer += common.encode_bool(mesh_data.use_auto_smooth) binary_buffer += common.encode_float(mesh_data.auto_smooth_angle) binary_buffer += common.encode_bool(mesh_data.has_custom_normals) if mesh_data.has_custom_normals: mesh_data.calc_normals_split( ) # Required otherwise all normals are (0, 0, 0) normals = [] for loop in mesh_data.loops: normals.extend((*loop.normal, )) binary_buffer += struct.pack(f"{len(normals)}f", *normals) # UV Maps for uv_layer in mesh_data.uv_layers: binary_buffer += common.encode_string(uv_layer.name) binary_buffer += common.encode_bool(uv_layer.active_render) # Vertex Colors for vertex_colors in mesh_data.vertex_colors: binary_buffer += common.encode_string(vertex_colors.name) binary_buffer += common.encode_bool(vertex_colors.active_render) if obj.type != "MESH": obj.to_mesh_clear() return binary_buffer