def __bytes__(self): """ Convert this Field to a byte sequence that is ready to be transmitted over the wire. :return: The FIX-compliant, raw byte sequence for this Field. """ return utils.encode(self.tag) + b"=" + utils.encode( self.value) + settings.SOH
def routing_id_group_pairs(routing_id_group): """Returns a list of (tag, value) tuples for the repeating group""" group = routing_id_group pairs = [(utils.encode(routing_id_group.tag), routing_id_group.size)] for instance in group: pairs += [(utils.encode(tag), value) for tag, value in instance.values()] return pairs
async def listen(self): """ Listen for new messages that are sent by the server. """ begin_string = utils.encode(f"{connection.protocol.Tag.BeginString}=" ) + utils.encode(settings.BEGIN_STRING) checksum_start = settings.SOH + utils.encode( f"{connection.protocol.Tag.CheckSum}=") data = [] while not self.writer.transport.is_closing( ): # Listen forever for new messages try: # Try to read a complete message. data = await self.reader.readuntil( begin_string) # Detect beginning of message. # TODO: should there be a timeout for reading an entire message? data += await self.reader.readuntil( checksum_start) # Detect start of checksum field. data += await self.reader.readuntil( settings.SOH) # Detect final message delimiter. await self.pipeline.receive(data) data = None except IncompleteReadError as e: # Connection was closed before a complete message could be received. if (utils.encode( f"{connection.protocol.Tag.MsgType}={connection.protocol.MsgType.Logout}" ) + settings.SOH in data): await self.pipeline.receive( data ) # Process logout message in the pipeline as per normal asyncio.create_task(self.pipeline.stop()) return # Stop trying to listen for more messages. else: logger.error( f"{self.name}: Unexpected EOF waiting for next chunk of partial data " f"'{utils.decode(e.partial)}' ({e}).") raise e except LimitOverrunError as e: # Buffer limit reached before a complete message could be read - abort! logger.error( f"{self.name}: Stream reader buffer limit exceeded! ({e})." ) raise e
def test_constructing_with_fix_null_value(self): assert Field(1, utils.encode(utils.null)) == None # noqa assert Field(1, utils.encode(utils.null)).value is None assert Field(1, utils.null) == None # noqa assert Field(1, utils.null).value is None assert Field(1, str(utils.null)) == None # noqa assert Field(1, str(utils.null)).value is None assert Field(1, None) == None # noqa assert Field(1, None).value is None
async def listen(self): """ Listen for new messages that are sent by the server. """ begin_string = utils.encode(f"{connection.protocol.Tag.BeginString}=" ) + utils.encode(settings.BEGIN_STRING) checksum_start = settings.SOH + utils.encode( f"{connection.protocol.Tag.CheckSum}=") data = [] try: while not self.writer.is_closing( ): # Listen forever for new messages try: # Try to read a complete message. data = await self.reader.readuntil( begin_string) # Detect beginning of message. # TODO: should there be a timeout for reading an entire message? data += await self.reader.readuntil( checksum_start) # Detect start of checksum field. data += await self.reader.readuntil( settings.SOH) # Detect final message delimiter. await self.pipeline.receive(data) data = None except IncompleteReadError: if (data and utils.encode( f"{connection.protocol.Tag.MsgType}={connection.protocol.MsgType.Logout}" ) + settings.SOH in data): # Connection was closed before a complete message could be received. await self.pipeline.receive( data ) # Process logout message in the pipeline as per normal break else: # Something else went wrong, re-raise raise except Exception as e: # Unhandled exception - abort! asyncio.create_task(self.pipeline.stop(e))
def test_decode_message_raises_exception_if_no_beginstring( self, encoder_app, decoder_app): with pytest.raises(ParsingError): m = generic_message_factory( ( connection.protocol.Tag.MsgType, connection.protocol.MsgType.TestRequest, ), (connection.protocol.Tag.MsgSeqNum, 1), (connection.protocol.Tag.TestReqID, "a"), ( connection.protocol.Tag.SendingTime, datetime.utcnow().strftime(settings.DATETIME_FORMAT)[:-3], ), ) data = encoder_app.encode_message(m).replace( b"8=" + utils.encode(settings.BEGIN_STRING), b"") decoder_app.decode_message(data)
async def send_test_request(self): """ Checks if the server is responding to TestRequest messages. """ self._test_request_id = uuid.uuid4().hex logger.warning( f"{self.name}: Heartbeat exceeded, sending test request '{self._test_request_id}'..." ) # Don't need to block while request is sent asyncio.create_task( self.send(admin.TestRequestMessage(utils.encode(self._test_request_id))) ) # Sleep while we wait for a response on the test request await asyncio.sleep(self.test_request_response_delay) if self.is_waiting(): self._server_not_responding.set()
def test_encode_str(): assert utils.encode("abc") == b"abc"
def test_encode_bytearray(): assert utils.encode(bytearray("abc", encoding="utf-8")) == b"abc"
def test_encode_bytes(): assert utils.encode(b"abc") == b"abc"
def test_encode_none(): assert utils.encode(None) == utils.encode(utils.null)
def test_bytes_converts_none_to_null(self): f = Field(1, None) assert bytes(f) == b"1=" + utils.encode(utils.null) + settings.SOH
def test_null_value_casting(self): assert Field(1, utils.null) == None # noqa assert Field(1, str(utils.null)) == None # noqa assert Field(1, utils.encode(utils.null)) == None # noqa
def test_encode_float(): assert utils.encode(1.23) == b"1.23"
def encode_message(self, message: FIXMessage) -> bytes: """ :param message: The message to encode. :return: The FIX-compliant, raw binary string representation for this message with freshly generated header tags. """ try: message = message.result() except AttributeError: # Not an Unfuture - use as-is pass message.validate( ) # Make sure the message is valid before attempting to encode. if message.sender_id is None: message.sender_id = self.pipeline.apps[ ClientSessionApp.name].sender if message.target_id is None: message.target_id = self.pipeline.apps[ ClientSessionApp.name].target body = (utils.encode(f"{connection.protocol.Tag.MsgType}=") + utils.encode(message.type) + settings.SOH + utils.encode(f"{connection.protocol.Tag.MsgSeqNum}=") + utils.encode(message.seq_num) + settings.SOH + utils.encode(f"{connection.protocol.Tag.SenderCompID}=") + utils.encode(message.sender_id) + settings.SOH + utils.encode(f"{connection.protocol.Tag.SendingTime}=") + utils.encode(str(message.SendingTime)) + settings.SOH + utils.encode(f"{connection.protocol.Tag.TargetCompID}=") + utils.encode(message.target_id) + settings.SOH) for field in message.fields: if field.tag in self.DYNAMIC_TAGS: # These tags will be generated - ignore. continue body += bytes(field) header = (utils.encode(f"{connection.protocol.Tag.BeginString}=") + utils.encode(settings.BEGIN_STRING) + settings.SOH + utils.encode(f"{connection.protocol.Tag.BodyLength}=") + utils.encode(len(body)) + settings.SOH) trailer = ( utils.encode(f"{connection.protocol.Tag.CheckSum}=") + utils.encode(f"{utils.calculate_checksum(header + body):03}") + settings.SOH) return header + body + trailer
def test_encode_int(): assert utils.encode(123) == b"123"
def test_encode_bool(): assert utils.encode(True) == b"Y" assert utils.encode(False) == b"N"