def assert_stream_equals(self, a_stream: CommandStream, b_stream: CommandStream, msg: str = None): a, b = a_stream.data, b_stream.data self.assertEqual(a.keys(), b.keys()) keep = [ MessageType.BLENDER_DATA_REMOVE, MessageType.BLENDER_DATA_RENAME, MessageType.BLENDER_DATA_UPDATE, ] for k in a.keys(): if k not in keep: continue message_type = str(MessageType(k)) message_count = len(a[k]) # self.assertEqual(message_count, len(b[k]), f"len mismatch for {message_type}") if message_count != 0: logger.info(f"Message count for {message_type:16} : {message_count}") expected_count = self.expected_counts.get(k) if expected_count is not None: self.assertEqual( expected_count, message_count, f"Unexpected message count for message {message_type}. Expected {expected_count}: found {message_count}", ) for i, buffers in enumerate(zip(a[k], b[k])): strings = [decode_string(buffer, 0)[0] for buffer in buffers] dicts = [json.loads(string) for string in strings] self.assertDictAlmostEqual(*dicts, f"content mismatch for {message_type} {i}")
def assert_stream_equals(self, a_stream: CommandStream, b_stream: CommandStream, msg: str = None): a, b = a_stream.data, b_stream.data self.assertEqual(a.keys(), b.keys()) # TODO clarify why we need to ignore TRANSFORM (float comparison) ignore = [ MessageType.TRANSFORM, MessageType.BLENDER_DATA_REMOVE, MessageType.BLENDER_DATA_RENAME, MessageType.BLENDER_DATA_UPDATE, ] for k in a.keys(): message_type = str(MessageType(k)) message_count = len(a[k]) if message_count != 0: logger.info( f"Message count for {message_type:16} : {message_count}") if k not in ignore: expected_count = self.expected_counts.get(k) if expected_count is not None: self.assertEqual( expected_count, message_count, f"Unexpected message count for message {message_type}. Expected {expected_count}: found {message_count}", ) self.assertEqual(a[k], b[k], f"content mismatch for {message_type}")
def assert_stream_equals(self, streams_a: CommandStream, streams_b: CommandStream, msg: str = None): self.assertEqual(streams_a.commands.keys(), streams_b.commands.keys()) def decode_and_sort_messages( commands: List[mixer.codec.Message] ) -> List[mixer.codec.Message]: stream = [mixer.codec.decode(c) for c in commands] stream.sort() return stream def sort_buffers(commands: List[Command]) -> List[Command]: stream = [c.data for c in commands] stream.sort() return stream message_types = streams_a.commands.keys() - self.ignored_messages for message_type in message_types: commands_a, commands_b = streams_a.commands[ message_type], streams_b.commands[message_type] len_a = len(commands_a) if len_a == 0: continue message_name = str(MessageType(message_type)) logger.info(f"Message count for {message_name:16} : {len_a}") # Equality tests required to handle float comparison. # This prevents us from using raw buffer comparison if they contain floats, # so decode the messages that contain floats. # Due to a lack of time not all decodable message classes are implemented. if mixer.codec.is_registered(message_type): decoded_stream_a = decode_and_sort_messages(commands_a) decoded_stream_b = decode_and_sort_messages(commands_b) if message_type in { MessageType.BLENDER_DATA_CREATE, MessageType.BLENDER_DATA_UPDATE }: string_a = "\n".join( [message.proxy_string for message in decoded_stream_a]) string_b = "\n".join( [message.proxy_string for message in decoded_stream_b]) else: string_a = "\n".join( [str(message) for message in decoded_stream_a]) string_b = "\n".join( [str(message) for message in decoded_stream_b]) detail_message = f"Stream_a\n{string_a}\nStream_b\n{string_b}\n" if len(decoded_stream_a) != len(decoded_stream_b): self.failureException( f"{message_type} : sequence length mismatch:\n{detail_message}" ) expected_count = self.expected_counts.get(message_type) if expected_count is not None: self.assertEqual( expected_count, len_a, f"Unexpected message count for message {message_name}. Expected {expected_count}: found {len_a}\n{detail_message}", ) def decode_proxy_strings(stream): for decoded in stream: # HACK do not hardcode proxy_string = getattr(decoded, "proxy_string", None) if proxy_string is not None: decoded.proxy_string = json.loads(proxy_string) decode_proxy_strings(decoded_stream_a) decode_proxy_strings(decoded_stream_b) for i, (decoded_a, decoded_b) in enumerate( zip(decoded_stream_a, decoded_stream_b)): # TODO there another failure case with floats as they will cause sort differences for proxies # we actually need to sort on something else, that the encoded json of the proxy, maybe the uuid self.assertIs( type(decoded_a), type(decoded_b), f"{message_name}: Type mismatch at decoded message mismatch at index {i}", ) if message_type == MessageType.BLENDER_DATA_CREATE: short_a = [(message.proxy_string["_data"]["name"], message.proxy_string["_bpy_data_collection"]) for message in decoded_stream_a] short_b = [(message.proxy_string["_data"]["name"], message.proxy_string["_bpy_data_collection"]) for message in decoded_stream_b] self.assertListEqual( short_a, short_b, f"Mismatch for {message_name} at index {i}") for i, (decoded_a, decoded_b) in enumerate( zip(decoded_stream_a, decoded_stream_b)): self.assert_any_almost_equal( decoded_a, decoded_b, f"{message_name}: decoded message mismatch at index {i}" ) else: buffer_stream_a = sort_buffers(commands_a) buffer_stream_b = sort_buffers(commands_b) len_a = len(buffer_stream_a) len_b = len(buffer_stream_b) if len_a != len_b: def dump(buffers): strings = [str(b) for b in buffers] return "\n".join(strings) string_a = dump(buffer_stream_a) string_b = dump(buffer_stream_b) message = f"Stream_a ({len_a} elements)\n{string_a}\n\nStream_b ({len_b} elements)\n{string_b}\n" raise self.failureException( f"\n{message_name} : sequence length mismatch:\n{message}" ) for i, (buffer_a, buffer_b) in enumerate( zip(buffer_stream_a, buffer_stream_b)): self.assertIs(type(buffer_a), type(buffer_b)) self.assert_any_almost_equal( buffer_a, buffer_b, f"{message_name}: encoded buffer mismatch at index {i}" )
def assert_stream_equals( self, streams_a: CommandStream, streams_b: CommandStream, msg: str = None, ignore: Iterable[str] = () ): self.assertEqual(streams_a.commands.keys(), streams_b.commands.keys()) for k in streams_a.commands.keys(): len_a = len(streams_a.commands[k]) len_b = len(streams_b.commands[k]) self.assertEqual(len_a, len_b, f"Command count mismatch for {MessageType(k)!r}: {len_a} vs {len_b}") def decode_and_sort_messages(commands: List[Command]) -> List[mixer.codec.Message]: stream = [mixer.codec.decode(c) for c in commands] stream.sort() return stream def sort_buffers(commands: List[Command]) -> List[bytes]: stream = [c.data for c in commands] stream.sort() return stream message_types = streams_a.commands.keys() - self.ignored_messages for message_type in message_types: commands_a, commands_b = streams_a.commands[message_type], streams_b.commands[message_type] len_a = len(commands_a) if len_a == 0: continue message_name = str(MessageType(message_type)) logger.info(f"Message count for {message_name:16} : {len_a}") # Equality tests required to handle float comparison. # This prevents us from using raw buffer comparison if they contain floats, # so decode the messages that contain floats. # Due to a lack of time not all decodable message classes are implemented. if mixer.codec.is_registered(message_type): decoded_stream_a = decode_and_sort_messages(commands_a) decoded_stream_b = decode_and_sort_messages(commands_b) if message_type in {MessageType.BLENDER_DATA_CREATE, MessageType.BLENDER_DATA_UPDATE}: string_a = "\n".join([message.proxy_string for message in decoded_stream_a]) string_b = "\n".join([message.proxy_string for message in decoded_stream_b]) else: string_a = "\n".join([str(message) for message in decoded_stream_a]) string_b = "\n".join([str(message) for message in decoded_stream_b]) detail_message = f"Stream_a\n{string_a}\nStream_b\n{string_b}\n" if len(decoded_stream_a) != len(decoded_stream_b): self.failureException(f"{message_type} : sequence length mismatch:\n{detail_message}") expected_count = self.expected_counts.get(message_type) if expected_count is not None: self.assertEqual( expected_count, len_a, f"Unexpected message count for message {message_name}. Expected {expected_count}: found {len_a}\n{detail_message}", ) def decode_proxy_strings(stream): for decoded in stream: # HACK do not hardcode proxy_string = getattr(decoded, "proxy_string", None) if proxy_string is not None: decoded.proxy_string = json.loads(proxy_string) decode_proxy_strings(decoded_stream_a) decode_proxy_strings(decoded_stream_b) for i, (decoded_a, decoded_b) in enumerate(zip(decoded_stream_a, decoded_stream_b)): # TODO there another failure case with floats as they will cause sort differences for proxies # we actually need to sort on something else, that the encoded json of the proxy, maybe the uuid self.assertIs( type(decoded_a), type(decoded_b), f"{message_name}: Type mismatch at decoded message mismatch at index {i}", ) if message_type == MessageType.BLENDER_DATA_CREATE: def identifier(message): return ( message.proxy_string["_datablock_uuid"], message.proxy_string["_bpy_data_collection"], message.proxy_string["_data"].get("name"), ) def patch(message): # remove folder part, that differs when workspace folders differ # process only create since room grabbing only generates CREATE messages proxy = message.proxy_string if "_filepath_raw" in proxy: filename = Path(proxy["_filepath_raw"]).name proxy["_filepath_raw"] = filename proxy["_data"]["filepath"] = filename proxy["_data"]["filepath_raw"] = filename short_a, short_b = [], [] for a, b in zip(decoded_stream_a, decoded_stream_b): short_a.append(identifier(a)) short_b.append(identifier(b)) patch(a) patch(b) self.assertListEqual(short_a, short_b, f"Mismatch for {message_name} at index {i}") elif message_type == MessageType.BLENDER_DATA_MEDIA: # workspaces are not set for room grabbing and BLENDER_DATA_MEDIA are always received # although they were not part of the test operation with workspaces for a, b in zip(decoded_stream_a, decoded_stream_b): # remove folder part, that differs when workspace folders differ a.path = Path(a.path).name b.path = Path(b.path).name for i, (decoded_a, decoded_b) in enumerate(zip(decoded_stream_a, decoded_stream_b)): self.assert_any_almost_equal( decoded_a, decoded_b, f"{message_name}: decoded message mismatch at index {i}", ignore=ignore ) else: buffer_stream_a = sort_buffers(commands_a) buffer_stream_b = sort_buffers(commands_b) len_a = len(buffer_stream_a) len_b = len(buffer_stream_b) if len_a != len_b: def dump(buffers): strings = [str(b) for b in buffers] return "\n".join(strings) string_a = dump(buffer_stream_a) string_b = dump(buffer_stream_b) message = f"Stream_a ({len_a} elements)\n{string_a}\n\nStream_b ({len_b} elements)\n{string_b}\n" raise self.failureException(f"\n{message_name} : sequence length mismatch:\n{message}") for i, (buffer_a, buffer_b) in enumerate(zip(buffer_stream_a, buffer_stream_b)): self.assertIs(type(buffer_a), type(buffer_b)) self.assert_any_almost_equal( buffer_a, buffer_b, f"{message_name}: encoded buffer mismatch at index {i}", ignore=ignore )