Exemplo n.º 1
0
    def write(self, value, sideEffects=False):
        """
        Write a new value to the register. The value may be a bytearray or
        a value appropriate to the register type.
        """

        reg = [CADI.CADIReg(self._info.regNumber, 0, i, False, self._info.attribute) for i in xrange(self.__reg_count)]
        full_array = bytearray(self.__reg_count * 16)
        if isinstance(value, bytearray):
            full_array[0 : len(value)] = value
        elif self.__conversion_str != None:
            if self.__conversion_str != "":
                pack_into(self.__conversion_str, full_array, 0, value)
        elif self._info.display == CADI.CADI_REGTYPE_SYMBOL:
            if value not in self.__symbols:
                raise ValueError('"%s" is not a valid symbol for register %s' % self._info.name)
            pack_into("<I", full_array, 0, self.__symbols.index(value))
        elif self._info.display == CADI.CADI_REGTYPE_STRING:
            full_array[0 : len(value)] = value
        else:
            raise TypeError("Expecting a bytearray or %s" % self.__type_str)
        offset = 0
        for r in reg:
            r.bytes = full_array[offset : (offset + 16)]
            offset += 16
        self.__cadi.CADIRegWrite(reg, sideEffects)
    def pack_header(self):
        """
        Given a type and length, pack a header to be sent to the port agent.
        """
        if self.__data is None:
            log.error('pack_header: no data!')

        else:
            # Set the packet type if it was not passed in as parameter
            if self.__type is None:
                self.__type = self.DATA_FROM_DRIVER
            self.set_data_length(len(self.__data))
            self.set_timestamp()

            variable_tuple = (0xa3, 0x9d, 0x7a, self.__type,
                              self.__length + HEADER_SIZE, 0x0000,
                              self.__port_agent_timestamp)

            header_format = '>BBBBHHd'
            header_size = struct.calcsize(header_format)
            temp_header = ctypes.create_string_buffer(header_size)
            struct.pack_into(header_format, temp_header, 0, *variable_tuple)
            self.__header = temp_header.raw

            # do the checksum last, since the checksum needs to include the
            # populated header fields.
            # NOTE: This method is only used for test; messages TO the port_agent
            # do not include a header (as I mistakenly believed when I wrote
            # this)
            self.__checksum = self.calculate_checksum()
            self.__recv_checksum = self.__checksum
Exemplo n.º 3
0
 def sendChatMessageOIE(self, message):
     self.typemsgenvoye=4
     typeMsg=4
     ack=0
     res=0
     ack_num=0
     lengthDatagram=len(message)+len(self.userName)+9
     Message=message
     buf = ctypes.create_string_buffer(lengthDatagram)
     header=(typeMsg<<28)|(ack<<27)|(res<<26)|(ack_num<<13)|(self.seq_num)
     if(self.RoomName==ROOM_IDS.MAIN_ROOM):
         print "envoie de message main room cote client"
         formatDatagram=">LHHLH"+str(len(Message))+"s"
         print "********************%d"%self.userId
         struct.pack_into(formatDatagram, buf,0,header,lengthDatagram,self.userId,0,0,Message)
         self.EnvoiMsg(buf)
     else:
         print "on a envoyé la request pour chat movie room"
         Movie=self.MovieStore.getMovieByTitle(self.RoomName)
         IpAdressMovie=Movie.movieIpAddress
         MoviePort=Movie.moviePort
         Tab=IpAdressMovie.split('.')
         formatDatagram=">LHHBBBBH"+str(len(Message))+"s"
         struct.pack_into(formatDatagram, buf,0,header,lengthDatagram,self.userId,int(Tab[0]),int(Tab[1]),int(Tab[2]),int(Tab[3]),MoviePort,Message)
         self.EnvoiMsg(buf)
     print "Request send chat Msg envoyé"
Exemplo n.º 4
0
def _boot_pkt(socket, host, op, a1, a2, a3, data=None, offset=0, datasize=0):
    """
    Packs the given data into the format required by the Boot ROM program that
    executes automatically when SpiNNaker is first turned on.

    :param socket: stream to write the command to
    :param host: hostname of the target SpiNNaker
    :param op:   boot ROM command
    :param arg1: argument 1 -- varies with ``op``
    :param arg2: argument 2 -- varies with ``op``
    :param arg3: argument 3 -- varies with ``op``
    :param data: optional data
    :param offset: the offset into the data to start from
    :param datasize: the maximum amount of data to write from the data

    """

    if data is not None:
        pkt_data = numpy.zeros(datasize + 18, dtype=numpy.uint8)
        struct.pack_into(">HLLLL", pkt_data, 0, BOOT_PROT_VER, op, a1, a2, a3)
        off = 0
        readsize = datasize
        if (offset + readsize) > data.size:
            readsize = data.size - offset
        while off < readsize:
            the_word = struct.unpack_from("<I", data, offset + off)[0]
            struct.pack_into(">I", pkt_data, 18 + off, the_word)
            off += 4
        socket.sendto(pkt_data, (host, BOOT_PORT))
    else:
        hdr = struct.pack(">HLLLL", BOOT_PROT_VER, op, a1, a2, a3)
        socket.sendto(hdr, (host, BOOT_PORT))
    time.sleep(BOOT_DELAY)
Exemplo n.º 5
0
 def sendJoinRoomRequestOIE(self,roomName):
     self.RoomName=roomName      # il y'avait self.thisRoomName???
     ack_num=0
     res=0
     ack=0
     if (roomName!=ROOM_IDS.MAIN_ROOM):
         self.userStatus=userStatus['waitingfMovieRoomUserList']
         self.typemsgenvoye="6"
         typeMsg=6
         msgLength=14
         Movie=self.MovieStore.getMovieByTitle(roomName)
         print Movie.movieTitle
         IpAdressMovie=Movie.movieIpAddress
         print IpAdressMovie
         MoviePort=Movie.moviePort
         buf=ctypes.create_string_buffer(14)
         header=(typeMsg<<28)|(ack<<27)|(res<<26)|(ack_num<<13)|(self.seq_num)
         Tab=IpAdressMovie.split('.')
         print Tab
         struct.pack_into(">LHHBBBBH", buf,0,header,msgLength,self.userId,int(Tab[0]),int(Tab[1]),int(Tab[2]),int(Tab[3]),MoviePort)
         self.EnvoiMsg(buf)
     else:
         typeMsg=7
         self.userStatus=userStatus['waitingMainRoom']
         self.typemsgenvoye="7"
         msgLength=8
         buf=ctypes.create_string_buffer(8)
         header=(typeMsg<<28)|(ack<<27)|(res<<26)|(ack_num<<13)|(self.seq_num)
         struct.pack_into(">LHH", buf,0,header,msgLength,self.userId)
         self.EnvoiMsg(buf)
     print "Request join movie room envoyé"
Exemplo n.º 6
0
    def __setitem__(self, position, value):
        try:
            offset = self._offset_data_start \
                     + sum(self._allocated_bytes[:position])
            current_format = self._get_packing_format(position)
        except IndexError:
            raise IndexError("assignment index out of range")

        if not isinstance(value, (str, bytes)):
            new_format = self._types_mapping[type(value)]
        else:
            if len(value) > self._allocated_bytes[position]:
                raise ValueError("exceeds available storage for existing str")
            if current_format[-1] == "s":
                new_format = current_format
            else:
                new_format = self._types_mapping[str] % (
                    self._allocated_bytes[position],
                )

        self._set_packing_format_and_transform(
            position,
            new_format,
            value
        )
        value = value.encode(_encoding) if isinstance(value, str) else value
        struct.pack_into(new_format, self.shm.buf, offset, value)
Exemplo n.º 7
0
  def SerializeArray(self, value, data_offset, data, handle_offset):
    size = (serialization.HEADER_STRUCT.size +
            self.sub_type.GetByteSize() * len(value))
    data_end = len(data)
    position = len(data) + serialization.HEADER_STRUCT.size
    data.extend(bytearray(size +
                          serialization.NeededPaddingForAlignment(size)))
    returned_handles = []
    to_pack = []
    for item in value:
      (new_data, new_handles) = self.sub_type.Serialize(
          item,
          len(data) - position,
          data,
          handle_offset + len(returned_handles))
      to_pack.extend(serialization.Flatten(new_data))
      returned_handles.extend(new_handles)
      position = position + self.sub_type.GetByteSize()

    serialization.HEADER_STRUCT.pack_into(data, data_end, size, len(value))
    # TODO(azani): Refactor so we don't have to create big formatting strings.
    struct.pack_into(('%s' % self.sub_type.GetTypeCode()) * len(value),
                     data,
                     data_end + serialization.HEADER_STRUCT.size,
                     *to_pack)
    return (data_offset, returned_handles)
Exemplo n.º 8
0
    def updateDossierCompDescr(self, dossierCompDescrArray, offset, size):
        length = len(self.__data)
        newSize = length * self.__itemSize
        if self.__isExpanded:
            fmt = '<' + self.__itemFormat * length
            values = []
            for key, value in self.__data.iteritems():
                values += self.__itemToList(key, value)

            if newSize == size:
                struct.pack_into(fmt, dossierCompDescrArray, offset, *values)
                return (dossierCompDescrArray, newSize)
            return (dossierCompDescrArray[:offset] + array('c', struct.pack(fmt, *values)) + dossierCompDescrArray[offset + size:], newSize)
        offsets = self.__offsets
        for key in self.__changed:
            recordOffset = offset + offsets[key]
            struct.pack_into(('<' + self.__itemFormat), dossierCompDescrArray, recordOffset, *self.__itemToList(key, self.__data[key]))

        self.__changed.clear()
        if self.__added:
            values = []
            recordOffset = size
            for key in self.__added:
                values += self.__itemToList(key, self.__data[key])
                offsets[key] = recordOffset
                recordOffset += self.__itemSize

            fmt = '<' + self.__itemFormat * len(self.__added)
            dossierCompDescrArray = dossierCompDescrArray[:offset + size] + array('c', struct.pack(fmt, *values)) + dossierCompDescrArray[offset + size:]
            self.__added.clear()
        return (dossierCompDescrArray, newSize)
Exemplo n.º 9
0
    def updateDossierCompDescr(self, dossierCompDescrArray, offset, size):
        length = size / self.__itemSize
        newLength = len(self.__list)
        newSize = newLength * self.__itemSize
        if self.__isExpanded:
            fmt = '<' + self.__itemFormat * newLength
            values = []
            for item in self.__list:
                values += self.__itemToList(item)

            if newSize == size:
                struct.pack_into(fmt, dossierCompDescrArray, offset, *values)
                return (dossierCompDescrArray, newSize)
            return (dossierCompDescrArray[:offset] + array('c', struct.pack(fmt, *values)) + dossierCompDescrArray[offset + size:], newSize)
        for idx in self.__changed:
            if idx < length:
                itemOffset = offset + idx * self.__itemSize
                struct.pack_into(('<' + self.__itemFormat), dossierCompDescrArray, itemOffset, *self.__itemToList(self.__list[idx]))

        self.__changed.clear()
        added = self.__list[length:]
        if added:
            values = []
            for item in added:
                values += self.__itemToList(item)

            fmt = '<' + self.__itemFormat * len(added)
            dossierCompDescrArray = dossierCompDescrArray[:offset + size] + array('c', struct.pack(fmt, *values)) + dossierCompDescrArray[offset + size:]
        return (dossierCompDescrArray, newSize)
Exemplo n.º 10
0
def blockListPacket(context, data):
	data = bytearray(data)
	print("[BlockList] Got block list! Updating local cache and rewriting packet...")
	# Jump to 0x28, 0x88 sep
	pos = 0x28
	while pos < len(data) and data[pos] != 0:
		name = data[pos:pos+0x40].decode('utf-16le')
		o1, o2, o3, o4, port = struct.unpack_from('BBBBH', buffer(data), pos+0x40)
		ipStr = "%i.%i.%i.%i" % (o1, o2, o3, o4)
		if port not in blocks.blockList:
			if verbose: print("[BlockList] Discovered new block %s at addr %s:%i! Recording..." % (name, ipStr, port))
			blocks.blockList[port] = (ipStr, name)
		if bNameMode == 0:
			blockstring = ("%s%s:%i" % (name[:6], ipStr, port)).encode('utf-16le')
			struct.pack_into('%is' % len(blockstring), data, pos, blockstring)
			if len(blockstring) < 0x40:
				struct.pack_into('%ix' % (0x40 - len(blockstring)), data, pos + len(blockstring))
		elif bNameMode == 1 and name[:5] in config.blockNames:
			blockstring = config.blockNames[name[:5]].encode('utf-16le')
			struct.pack_into('%is' % len(blockstring), data, pos, blockstring)
			if len(blockstring) < 0x40:
				struct.pack_into('%ix' % (0x40 - len(blockstring)), data, pos + len(blockstring))
		struct.pack_into('BBBB', data, pos+0x40, int(i0), int(i1), int(i2), int(i3))
		pos += 0x88

	return str(data)
Exemplo n.º 11
0
    def pack_into(self, buff, offset):
        """Serialize and write to ``buff`` starting at offset ``offset``.

        Intentionally follows the pattern of ``struct.pack_into``

        :param buff: The buffer to write into
        :param offset: The offset to start the write at
        """
        if self.partition_key is None:
            fmt = "!BBii%ds" % len(self.value)
            args = (self.MAGIC, self.compression_type, -1, len(self.value), self.value)
        else:
            fmt = "!BBi%dsi%ds" % (len(self.partition_key), len(self.value))
            args = (
                self.MAGIC,
                self.compression_type,
                len(self.partition_key),
                self.partition_key,
                len(self.value),
                self.value,
            )
        struct.pack_into(fmt, buff, offset + 4, *args)
        fmt_size = struct.calcsize(fmt)
        data = buffer(buff[(offset + 4) : (offset + 4 + fmt_size)])
        crc = crc32(data) & 0xFFFFFFFF
        struct.pack_into("!I", buff, offset, crc)
Exemplo n.º 12
0
 def changeTone(self, v_to, v_from='C'):
     folder_name = self.samples_folder+'/'+self.instrument.lower()
     to_filename = v_to.lower().replace('#', 's')+'.raw'
     from_filename = v_from.lower().replace('#', 's')+'.raw'
     f = open(folder_name+'/'+from_filename, 'rb')
     spam = f.read()
     f.close()
     i = 0
     ratio = float(self.notes[v_to])/self.notes[v_from]
     out = ctypes.create_string_buffer(int(float(len(spam))/ratio))
     while i<int(float(len(spam))/ratio):
         j = int(float(ratio)*i)
         j += i%4 - j%4
         if j+6<=len(spam):
             tmp1 = struct.unpack('h', spam[j:j+2])[0]
             tmp2 = struct.unpack('h', spam[j+4:j+6])[0]
             tmp = int(tmp1+(tmp2-tmp1)*(ratio-1))
             if tmp>256**2:
                 tmp = 256**2
             elif tmp<-256**2:
                 tmp = -256**2
             struct.pack_into('h', out, i, tmp)
         i = i+2
     f = open(folder_name+'/'+to_filename, 'wb')
     f.write(out)
     f.close()
Exemplo n.º 13
0
def _pack_packet(hostname, service, state, output, timestamp):
    """This is more complicated than a call to struct.pack() because we want
    to pad our strings with random bytes, instead of with zeros."""
    requested_length = struct.calcsize(_data_packet_format)
    packet = array.array('c', '\0'*requested_length)
    # first, pack the version, initial crc32, timestamp, and state
    # (collectively:header)
    header_format = '!hxxLLh'
    offset = struct.calcsize(header_format)
    struct.pack_into('!hxxLLh', packet, 0, PACKET_VERSION, 0, timestamp, state)
    # next, pad & pack the hostname
    hostname = hostname + '\0'
    if len(hostname) < MAX_HOSTNAME_LENGTH:
        hostname += get_random_alphanumeric_bytes(MAX_HOSTNAME_LENGTH - len(hostname))
    struct.pack_into('!%ds' % (MAX_HOSTNAME_LENGTH,), packet, offset, hostname)
    offset += struct.calcsize('!%ds' % (MAX_HOSTNAME_LENGTH,))
    # next, pad & pack the service description
    service = service + '\0'
    if len(service) < MAX_DESCRIPTION_LENGTH:
        service += get_random_alphanumeric_bytes(MAX_DESCRIPTION_LENGTH - len(service))
    struct.pack_into('%ds' % (MAX_DESCRIPTION_LENGTH,), packet, offset, service)
    offset += struct.calcsize('!%ds' % (MAX_DESCRIPTION_LENGTH))
    # finally, pad & pack the plugin output
    output = output + '\0'
    if len(output) < MAX_PLUGINOUTPUT_LENGTH:
        output += get_random_alphanumeric_bytes(MAX_PLUGINOUTPUT_LENGTH - len(output))
    struct.pack_into('%ds' % (MAX_PLUGINOUTPUT_LENGTH,), packet, offset, output)
    # compute the CRC32 of what we have so far
    crc_val = binascii.crc32(packet) & 0xffffffffL
    struct.pack_into('!L', packet, 4, crc_val)
    return packet.tostring()
Exemplo n.º 14
0
    def serialize(self, payload=None, prev=None):
        present = 0
        hdr = bytearray()
        optional = bytearray()

        if self.checksum is not None:
            present |= GRE_CHECKSUM_FLG

            # For purposes of computing the checksum,
            # the value of the checksum field is zero.
            # Also, because Reserved1 is always 0x00 of 2 bytes,
            # Set in conjunction with checksum.
            optional += b'\x00' * self._CHECKSUM_LEN

        if self._key is not None:
            present |= GRE_KEY_FLG
            optional += struct.pack(self._KEY_PACK_STR, self._key)

        if self.seq_number is not None:
            present |= GRE_SEQUENCE_NUM_FLG
            optional += struct.pack(self._SEQNUM_PACK_STR, self.seq_number)

        msg_pack_into(self._PACK_STR, hdr, 0, present, self.version,
                      self.protocol)

        hdr += optional

        if self.checksum:
            self.checksum = packet_utils.checksum(hdr)
            struct.pack_into(self._CHECKSUM_PACK_STR, hdr, self._MIN_LEN,
                             self.checksum)

        return hdr
    def test_checksum(self):
        """
        This tests the checksum algorithm; if somebody changes the algorithm
        this test should catch it.  Had to jump through some hoops to do this;
        needed to add set_data_length and set_header because we're building our
        own header here (the one in PortAgentPacket includes the timestamp
        so the checksum is not consistent).
        """
        test_data = "This tests the checksum algorithm."
        test_length = len(test_data)
        self.pap.attach_data(test_data)

        # Now build a header
        variable_tuple = (0xa3, 0x9d, 0x7a, self.pap.DATA_FROM_DRIVER,
                          test_length + HEADER_SIZE, 0x0000,
                          0)
        self.pap.set_data_length(test_length)

        header_format = '>BBBBHHd'
        size = struct.calcsize(header_format)
        temp_header = ctypes.create_string_buffer(size)
        struct.pack_into(header_format, temp_header, 0, *variable_tuple)

        # Now set the header member in PortAgentPacket to the header
        # we built
        self.pap.set_header(temp_header.raw)

        # Now get the checksum and verify it is what we expect it to be.
        checksum = self.pap.calculate_checksum()
        self.assertEqual(checksum, 2)
Exemplo n.º 16
0
    def serialize(self, payload, prev):
        offset = self.offset << 4
        h = bytearray(struct.pack(
            tcp._PACK_STR, self.src_port, self.dst_port, self.seq,
            self.ack, offset, self.bits, self.window_size, self.csum,
            self.urgent))

        if self.option:
            if isinstance(self.option, (list, tuple)):
                option_buf = bytearray()
                for opt in self.option:
                    option_buf.extend(opt.serialize())
                h.extend(option_buf)
                mod = len(option_buf) % 4
            else:
                h.extend(self.option)
                mod = len(self.option) % 4
            if mod:
                h.extend(bytearray(4 - mod))
            if self.offset:
                offset = self.offset << 2
                if len(h) < offset:
                    h.extend(bytearray(offset - len(h)))

        if self.offset == 0:
            self.offset = len(h) >> 2
            offset = self.offset << 4
            struct.pack_into('!B', h, 12, offset)

        if self.csum == 0:
            total_length = len(h) + len(payload)
            self.csum = packet_utils.checksum_ip(prev, total_length,
                                                 h + payload)
            struct.pack_into('!H', h, 16, self.csum)
        return six.binary_type(h)
Exemplo n.º 17
0
def test3():
    class X(object):
        def __init__(self, a,b):
            self.a, self.b = a,b  # ubyte, float
        def __eq__(self, other):
            return  self.a == other.a  and  abs(self.b - other.b) < 1e-6

    x = [ X(1, .1), X(2, .2) ]
    fmt = "=Bf"

    x_bytes = struct.calcsize(fmt)
    assert( x_bytes == 5 )

    buf = bytearray( len(x) * x_bytes )

    for i,n in enumerate(x):
        struct.pack_into( fmt, buf, i*x_bytes,  n.a, n.b )

    back = []
    count = int( len(buf) / x_bytes )
    for i in range(count):
        ab = struct.unpack_from( fmt, buf, i*x_bytes )
        back += [ X(*ab) ]

    assert( back == x )
Exemplo n.º 18
0
        def __setitem__(self, idx, value):
            if not isinstance(idx, int):
                raise TypeError("Expected an integer index")

            ptr = ((idx % self.type_size) * self._ctype_size + idx // self.type_size * self.byte_stride) + self.byte_offset

            struct.pack_into(self._ctype, self._buffer_data, ptr, value)
Exemplo n.º 19
0
    def sendlistMovie(self,ack_num,seq_num):
        print "-----------------------"
        print seq_num
        typeMsg=3
        ack=0
        res=0
        Data_length = 0
        List_Movies=self.serverProxy.getMovieList()
        print List_Movies 
        Nb_Movies=len(List_Movies)
        print Nb_Movies
        for movie in List_Movies: 
	        Data_length += len(movie.movieTitle)+7
        Msg_length=Data_length+8	
        buf = ctypes.create_string_buffer(Msg_length) 
        header=(typeMsg<<28)|(ack<<27)|(res<<26)|(ack_num<<13)|(seq_num)
        struct.pack_into('>LHH',buf,0,header,Msg_length,Nb_Movies) # Header packing
        pos_buffer=8
        for movie in List_Movies:
            Tab=movie.movieIpAddress.split('.')
            print movie.movieTitle
            print("%s"%movie.movieIpAddress)
            FormatData='>BBBBHB' + str(len(movie.movieTitle)) + 's' 
            struct.pack_into(FormatData,buf,pos_buffer,int(Tab[0]),int(Tab[1]),int(Tab[2]),int(Tab[3]),movie.moviePort,len(movie.movieTitle),movie.movieTitle)# packing each movie
            pos_buffer+=len(movie.movieTitle)+7
        self.EnvoiMsg(buf)
        print "message a été bien envoyé"
Exemplo n.º 20
0
    def serialize(self):
        self.opt_para_len = 0
        # use a 2-octets AS number placeholder here, and the 
        # real(maybe 4 octets) AS number is in support_4_octets_as_num 
        # capability.
        # Note that the "23456" isn't just a random number, it's 
        # defined in RFC4893
        two_octet_as = 23456 if self.my_as > 65535 else self.my_as
        hdr = bytearray(struct.pack(bgp4_open._PACK_STR, self.version, 
                        two_octet_as, self.hold_time,
                        self.bgp_identifier.value, self.opt_para_len))

        if self.data != []:
            hdr += bytearray(struct.pack('!BB', self.type_, self.para_len))
            for para in self.data:

                #hdr += bytearray(struct.pack('!BB', para.code, para.length))
                cls_ = self._CAPABILITY_ADVERTISEMENT.get(para.code, None)
                if cls_:
                    hdr += para.serialize()
                    self.para_len += para._MIN_LEN

        self.opt_para_len = self.para_len + 2
        struct.pack_into('!B', hdr, 9, self.opt_para_len)
        struct.pack_into('!B', hdr, 11, self.para_len)
        return hdr
Exemplo n.º 21
0
    def serialize(self):
        #serialize wd_routes_len and path_attr_len first
        hdr = bytearray(struct.pack(self._PACK_STR + 'HB', self.flag,
                        self.code, self.length, self.addr_family,
                        self.sub_addr_family))

        self.length = 3
        
        # assume in NLRI serialize we only handle IPv6
        hdr += bytearray(struct.pack('!B', self.next_hop_len))
        self.length += 1
        for i in self.next_hop:
            hdr += bytearray(i.packed)
        self.length += self.next_hop_len

        hdr += bytearray(struct.pack('!B', self.reserved))
        self.length += 1

        #nlri
        for i in self.nlri:
            sub_hdr = i.serialize()
            self.length += len(sub_hdr)
            hdr += sub_hdr

        #print '## mp_reach_nlri serialize nlri success'
        
        if self._PACK_STR == '!BBH':
            struct.pack_into('!H', hdr, 2, self.length)
        else:
            struct.pack_into('!B', hdr, 2, self.length)

        return hdr
Exemplo n.º 22
0
    def toBytes(self, destBuf, newOffset=None, imgDataOffset=None, destOffset=0, order="="):
        if newOffset is None and self.isOffset:
            newOffset = self.val

        if self.isOffset:
            valFmt = 'L'
            val = newOffset
        elif self.isImageDataOffsetEntry() and imgDataOffset:
            # we are writing image data offsets within this entry; they are not themselves offset
            # for this to not be offset, there must be only one, since it's a long
            valFmt = 'L'
            val = imgDataOffset
        else:
            valFmt = lookupTagType(self.type).fmt * self.count
            val = self.val

        fmt = order+"HHI"+valFmt
        fmtSize = struct.calcsize(fmt)
        if fmtSize < 12:
            fmt += 'x'*(12-fmtSize)

        packing = [self.tag, self.type, self.count]
        if isinstance(val, tuple):
            packing += list(val)
        else:
            packing.append(val)

        struct.pack_into(fmt, destBuf, destOffset, *packing)
        return 12
Exemplo n.º 23
0
def comms_on():
    sock = Udpsocket([])
    data = ctypes.create_string_buffer(13)
    data[0:9] = 'RF SW CMD'
    struct.pack_into("<I", data, 9, settings.RF_SW_CMD_ON_INT)
    d = bytearray(data)
    sock.sendto(d, (packet_settings.FRAME_RECEIVER_IP, packet_settings.FRAME_RECEIVER_PORT))
Exemplo n.º 24
0
def scan_rays(rays, max_distance, ray_origins=False, keep_render_setup=False, do_shading=True):

    elementsPerRay = 3
    if ray_origins == True:
      elementsPerRay = 6
  
    numberOfRays = int(len(rays)/elementsPerRay)


    rays_buffer = (ctypes.c_float * numberOfRays*elementsPerRay)()
    struct.pack_into("%df"%(numberOfRays*elementsPerRay), rays_buffer, 0, *rays[:numberOfRays*elementsPerRay])

    returns_buffer = (ctypes.c_float * (numberOfRays * ELEMENTS_PER_RETURN))()
   
    print ("Raycount: ", numberOfRays)
    
    returns_buffer_uint = ctypes.cast(returns_buffer, ctypes.POINTER(ctypes.c_uint))

    array_of_returns = []

    try:
      bpy.ops.render.blensor(raycount = numberOfRays,maximum_distance = max_distance, vector_strptr="%016X"%(ctypes.addressof(rays_buffer)), return_vector_strptr="%016X"%(ctypes.addressof(returns_buffer)), elements_per_ray = elementsPerRay, keep_render_setup=keep_render_setup,
      shading = do_shading)
      


      for idx in range(numberOfRays):
          if returns_buffer[idx*ELEMENTS_PER_RETURN] < max_distance and returns_buffer[idx*ELEMENTS_PER_RETURN]>0.0 :
              #The ray may have been reflecten and refracted. But the laser
              #does not know that so we need to calculate the point which
              #is the measured distance away from the sensor but without
              #beeing reflected/refracted. We use the original ray direction
              vec = [float(rays[idx*elementsPerRay]), 
                     float(rays[idx*elementsPerRay+1]),
                     float(rays[idx*elementsPerRay+2])]
              veclen = math.sqrt(vec[0]**2+vec[1]**2+vec[2]**2)
              raydistance = float(returns_buffer[idx*ELEMENTS_PER_RETURN])
              vec[0] = raydistance * vec[0]/veclen
              vec[1] = raydistance * vec[1]/veclen
              vec[2] = raydistance * vec[2]/veclen


              ret = [ float(returns_buffer[e + idx*ELEMENTS_PER_RETURN]) for e in range(4) ]            
              ret[1] = vec[0]
              ret[2] = vec[1]
              ret[3] = vec[2]
              ret.append(returns_buffer_uint[idx*ELEMENTS_PER_RETURN+4]) #objectid
              ret.append((returns_buffer[idx*ELEMENTS_PER_RETURN+5],
                          returns_buffer[idx*ELEMENTS_PER_RETURN+6],
                          returns_buffer[idx*ELEMENTS_PER_RETURN+7])) # RGB Value of the material
              ret.append(idx) # Store the index per return as the last element
              array_of_returns.append(ret)
    except TypeError as e:
      exc_type, exc_value, exc_traceback = sys.exc_info()
      traceback.print_tb(exc_traceback)
    finally:
      del rays_buffer
      del returns_buffer

    return array_of_returns
Exemplo n.º 25
0
    async def initial_connection(self, data):
        state = self._connection
        state.ssrc = data['ssrc']
        state.voice_port = data['port']

        packet = bytearray(70)
        struct.pack_into('>I', packet, 0, state.ssrc)
        state.socket.sendto(packet, (state.endpoint_ip, state.voice_port))
        recv = await self.loop.sock_recv(state.socket, 70)
        log.debug('received packet in initial_connection: %s', recv)

        # the ip is ascii starting at the 4th byte and ending at the first null
        ip_start = 4
        ip_end = recv.index(0, ip_start)
        state.ip = recv[ip_start:ip_end].decode('ascii')

        # the port is a little endian unsigned short in the last two bytes
        # yes, this is different endianness from everything else
        state.port = struct.unpack_from('<H', recv, len(recv) - 2)[0]
        log.debug('detected ip: %s port: %s', state.ip, state.port)

        # there *should* always be at least one supported mode (xsalsa20_poly1305)
        modes = [mode for mode in data['modes'] if mode in self._connection.supported_modes]
        log.debug('received supported encryption modes: %s', ", ".join(modes))

        mode = modes[0]
        await self.select_protocol(state.ip, state.port, mode)
        log.info('selected the voice protocol for use (%s)', mode)

        await self.client_connect()
Exemplo n.º 26
0
def icmpv6_csum(prev, buf):
    ph = struct.pack('!16s16sBBH', prev.src, prev.dst, 0, prev.nxt,
                     prev.payload_length)
    h = bytearray(buf)
    struct.pack_into('!H', h, 2, 0)

    return socket.htons(packet_utils.checksum(ph + h))
Exemplo n.º 27
0
 def throttle(self,speed,direction):
   '''
  This method controls the train's throttle.
  The two parameters are:
  speed 0 - 127  where 0 = stop
  direction 0 - FORWARD
            1 - REVERSE
  For imformation about the message sent by this method refer to the XpressNet protocol:
    'Locomotive speed and direction operations'
  example:
  t1.throttle(15,FORWARD)  # move train forward with a speed of 15 steps
  t1.throttle(0,FORWARD)   # stop train      
   '''
   message = bytearray('E400000000'.decode('hex'))
   message[1] = 0x13   #128 speed steps
   struct.pack_into(">H",message,2,self.address)
   # NB If speed is set to max, the command has to be done differently
   # else the elink bombs, so it is just easier to block it.
   if speed > 120:
     speed = 120
   message[4] = speed
   if direction == FORWARD : message[4] |= 0x80
   elif direction == REVERSE : message[4] &= 0x7F
   parity(message)
   send (message)
Exemplo n.º 28
0
    def pdpte_create_binary_file(self):
        # pae needs a pdpte at 32byte aligned address

        # Even though we have only 4 entries in the pdpte we need to move
        # the self.output_offset variable to the next page to start pushing
        # the pd contents
        #
        # FIXME: This wastes a ton of RAM!!
        if args.verbose:
            print("PDPTE at 0x%x" % self.pd_start_addr)

        for pdpte in range(self.total_pages + 1):
            if pdpte in self.get_pdpte_list():
                present = 1 << 0
                addr_of_pd = (((self.pd_start_addr + 4096) +
                               self.get_pdpte_list().index(pdpte) *
                               4096) >> 12) << 12
                binary_value = (present | addr_of_pd)
            else:
                binary_value = 0

            struct.pack_into(page_entry_format,
                             self.output_buffer,
                             self.output_offset,
                             binary_value)

            self.output_offset += struct.calcsize(page_entry_format)
Exemplo n.º 29
0
    def set(self, value):
        """
        If a primitive, assigns data.

        Complex objects should raise a ValueError.
        """
        struct.pack_into(self.pack, self.buf, self.offset, value)
Exemplo n.º 30
0
def pack_into_format(frg,ack,type_field,rt,seq_number, u_id, d_id, data_length):
    #
    buff = ctypes.create_string_buffer(HEADER_L+data_length)
    #
    first_byte = 0b00000000
    # in case the rt or type_field are higher than normal
    if( len(bin(rt))-2 > RT_L):
        sys.exit('ERROR :  RT = '+format(rt)+' - * RT (Room Type) field is too big * -')
    first_byte += rt
    if(len(bin(type_field))-2 > TYPE_FIELD_L):
        sys.exit('ERROR :  Type = '+format(type_field)+' - * Type field is too big * -')
    first_byte += type_field << 2
    # frg and ack fields can be either True (1) or False (0)
    if frg:
        first_byte += 0b10000000
    if ack:
        first_byte += 0b01000000

    #
    if(seq_number>255 or u_id >255 or d_id > 255):
        sys.exit('Error :  Sequence Number, User Id or Destination Id field is too big'+
                 '')
    struct.pack_into(HEADER_FORMAT, buff, 0, first_byte, seq_number, u_id, d_id, data_length)
    # data_length is the length of the data in bytes
    # struct.pack_into(format(data_length)+'B',buff, HEADER_L, data)
    
    return buff
 def offsets_accelerometer(self, offsets):
     data = bytearray(6)
     struct.pack_into("<hhh", data, 0, *offsets)
     self._write_register(_OFFSET_ACCEL_REGISTER, bytes(data))
 def pack(self, buffer, offset):
     """
     Construts a packed c structure for the protocol header
     :return: bytes
     """
     return struct.pack_into(self.struct_format, buffer, offset, 0, self.type, 0)
 def pack(self, buffer, offset):
     """
     Constructs a packed c structure from properties and provided target
     :return: byte array
     """
     struct.pack_into(self.struct_format, buffer, offset, self.target, 0,0,0,0,0,0, self.binary_field, self.sequence)
 def radius_magnetometer(self, radius):
     data = bytearray(2)
     struct.pack_into("<h", data, 0, radius)
     self._write_register(_RADIUS_MAGNET_REGISTER, bytes(data))
 def radius_accelerometer(self, radius):
     data = bytearray(2)
     struct.pack_into("<h", data, 0, radius)
     self._write_register(_RADIUS_ACCEL_REGISTER, bytes(data))
 def offsets_gyroscope(self, offsets):
     data = bytearray(6)
     struct.pack_into("<hhh", data, 0, *offsets)
     self._write_register(_OFFSET_GYRO_REGISTER, bytes(data))
 def offsets_magnetometer(self, offsets):
     data = bytearray(6)
     struct.pack_into("<hhh", data, 0, *offsets)
     self._write_register(_OFFSET_MAGNET_REGISTER, bytes(data))
Exemplo n.º 38
0
def swap16(data, offs):
	struct.pack_into("<H", data, offs, struct.unpack_from(">H", data, offs)[0])
def main():
    """
    Multicast example, sender part.
    An example where data is read from a file, encoded, and then send
    via the network.
    """
    parser = argparse.ArgumentParser(description=main.__doc__)
    """The parser takes a path to a file as input."""
    parser.add_argument(
        "--file-path",
        type=str,
        help="Path to the file which should be sent.",
        default=os.path.realpath(__file__),
    )
    """The parser takes the target IP-address as input."""
    parser.add_argument("--ip",
                        type=str,
                        help="The IP address to send to.",
                        default=MCAST_GRP)
    """The parser takes the target port as input."""
    parser.add_argument("--port",
                        type=int,
                        help="The port to send to.",
                        default=MCAST_PORT)
    """One can tell the parser to run without using the network."""
    parser.add_argument("--dry-run",
                        action="store_true",
                        help="Run without network use.")

    args = parser.parse_args()

    # Check the file.
    if not os.path.isfile(args.file_path):
        print("{} is not a valid file.".format(args.file_path))
        sys.exit(1)

    file_stats = os.stat(args.file_path)
    block_bytes = file_stats.st_size

    symbol_bytes = 1400
    symbols = block_bytes // symbol_bytes + 1
    width = kodo.perpetual.Width._8

    # Create and configure the encoder, coefficient generator and offset generator.
    encoder = kodo.perpetual.Encoder(width)
    encoder.configure(block_bytes, symbol_bytes)

    generator = kodo.perpetual.generator.RandomUniform(width)

    offset_generator = kodo.perpetual.offset.RandomUniform()
    offset_generator.configure(symbols)

    sock = socket.socket(family=socket.AF_INET,
                         type=socket.SOCK_DGRAM,
                         proto=socket.IPPROTO_UDP)

    sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)

    # Read the input data from the file, only the block_bytes first bytes
    # can fit into a single generation. No more data will be sent.
    # If the file is smaller than block_bytes then it will be zero-padded.
    f = open(os.path.expanduser(args.file_path), "rb")
    data_in = bytearray(f.read().ljust(encoder.block_bytes))
    f.close()

    # Assign the data_in buffer to the encoder symbol_storage.
    encoder.set_symbols_storage(data_in)

    if args.dry_run:
        sys.exit(0)

    address = (args.ip, args.port)

    header_data = bytearray(27)

    print("Processing...")
    while True and not args.dry_run:

        time.sleep(0.2)

        # Generate an encoded packet.
        seed = random.randint(0, 2**64 - 1)

        offset = offset_generator.offset()
        coefficients = generator.generate(seed)

        symbol = encoder.encode_symbol(coefficients, offset)

        struct.pack_into(
            "<QQBIIH",
            header_data,
            0,
            seed,
            offset,
            width.value,
            block_bytes,
            symbol_bytes,
            width,
        )

        # Send the encoded packet with seed and offset.
        packet = header_data + symbol
        sock.sendto(packet, address)
        print("Packet sent!")
Exemplo n.º 40
0
 def packData(self):
     struct.pack_into(self.DataPatern, self.data, self.DataOffset,
                      *list(self.ds1.values()))
Exemplo n.º 41
0
    def _build_loop_code(o_code, with_start, with_end, loop_count):
        code = type(o_code)

        # With statement:
        #
        # Header:
        #
        #              0 LOAD_GLOBAL              n (context_manager)
        #              3 SETUP_WITH              xx (to zz+4)
        #              6 STORE_FAST               m (context_manager variable (with ... as foobar))
        #
        # Body:
        #              9 <code_string>
        #
        # Footer:
        #
        #           zz+0 POP_BLOCK
        #           zz+1 LOAD_CONST               p (None)
        #       >>  zz+4 WITH_CLEANUP
        #           zz+5 END_FINALLY
        #           zz+6 LOAD_CONST               p (None)
        #           zz+9 RETURN_VALUE

        # move past the SETUP_WITH opcode (1 byte opcode itself + 2 bytes delta)
        code_start = with_start + 3
        # skip the next bytecode which can be one of POP_TOP, STORE_FAST, STORE_NAME, UNPACK_SEQUENCE (POP_TOP is 1 byte, the others are 3)
        if ord(o_code.co_code[code_start]) == dis.opmap['POP_TOP']:
            code_start += 1
        else:
            code_start += 3
        # at the end there is a POP_BLOCK + LOAD_CONST (index) (1 + 3 = 4 bytes)
        code_end = with_end - 4

        code_bytes = bytearray(o_code.co_code[code_start:code_end])

        try:
            xrange
        except NameError:
            names = o_code.co_names + ('range', )
        else:
            names = o_code.co_names + ('xrange', )

        code_constants = o_code.co_consts + (loop_count, )

        # Loop header:
        #
        #              0 SETUP_LOOP              xx (to zz+4)
        #              3 LOAD_NAME                n (xrange)
        #              6 LOAD_CONST               m (1000)
        #              9 CALL_FUNCTION            1
        #             12 GET_ITER
        #       >>    13 FOR_ITER                yy (to zz+3)
        #             16 POP_TOP
        #
        # Code body:
        #             17 <code_bytes>
        #
        # Loop footer:
        #
        #           zz+0 JUMP_ABSOLUTE           13
        #       >>  zz+3 POP_BLOCK
        #       >>  zz+4 LOAD_CONST               l (None)
        #           zz+7 RETURN_VALUE
        #
        # zz = len(loop_header) + len(code_bytes)
        # xx +  3 == zz + 4  ->  xx = len(loop_header) + len(code_bytes) + 4 -  3 = len(loop_header) + len(code_bytes) +  1
        # yy + 16 == zz + 3  ->  yy = len(loop_header) + len(code_bytes) + 3 - 16 = len(loop_header) + len(code_bytes) - 13  (13 is the FOR_ITER bytecode offset)

        loop_header = bytearray(
            '\x78\x00\x00\x65\x00\x00\x64\x00\x00\x83\x01\x00\x44\x5d\x00\x00\x01'
        )
        loop_footer = bytearray('\x71\x0d\x00\x57\x64\x00\x00\x53')

        struct.pack_into('<H', loop_header, 1,
                         len(loop_header) + len(code_bytes) +
                         1)  # SETUP_LOOP delta (xx)
        # LOAD_NAME index for range function
        struct.pack_into('<H', loop_header, 4, len(names) - 1)
        # LOAD_CONST index for loop count
        struct.pack_into('<H', loop_header, 7, len(code_constants) - 1)
        struct.pack_into('<H', loop_header, 14,
                         len(loop_header) + len(code_bytes) -
                         13)  # FOR_ITER delta (yy)

        struct.pack_into(
            '<H', loop_footer, 5,
            code_constants.index(None))  # LOAD_CONST index for None

        # adjust the jump addresses within the original code block to match the new bytecode offset they will have within the for loop
        index = 0
        code_length = len(code_bytes)
        offset = len(loop_header) - code_start
        while index < code_length:
            opcode = code_bytes[index]
            index += 1
            if opcode >= dis.HAVE_ARGUMENT:
                if opcode in dis.hasjabs:
                    jump_address = struct.unpack_from('<H', code_bytes,
                                                      index)[0]
                    struct.pack_into('<H', code_bytes, index,
                                     jump_address + offset)
                index += 2

        new_code_bytes = bytes(loop_header + code_bytes + loop_footer)

        # adjust the line numbers table
        class WithinCodeRange(object):
            def __init__(self, size):
                self.limit = size
                self.bytes = 0

            def __call__(self, increment_pair):
                byte_increment, line_increment = increment_pair
                self.bytes += byte_increment
                return self.bytes < self.limit

        byte_increments = deque(bytearray(o_code.co_lnotab[0::2]))
        line_increments = deque(bytearray(o_code.co_lnotab[1::2]))
        byte_offset = line_offset = 0
        while byte_offset < code_start:
            byte_offset += byte_increments.popleft()
            line_offset += line_increments.popleft()
        byte_increments.appendleft(len(loop_header))
        line_increments.appendleft(1)

        line_numbers_table = bytes(
            bytearray(
                chain.from_iterable(
                    takewhile(WithinCodeRange(len(loop_header + code_bytes)),
                              zip(byte_increments, line_increments)))))

        return code(o_code.co_argcount, o_code.co_nlocals, o_code.co_stacksize,
                    o_code.co_flags, new_code_bytes, code_constants, names,
                    o_code.co_varnames, o_code.co_filename, o_code.co_name,
                    o_code.co_firstlineno + line_offset - 1,
                    line_numbers_table, o_code.co_freevars, o_code.co_cellvars)
Exemplo n.º 42
0
def swap32(data, offs):
	struct.pack_into("<I", data, offs, struct.unpack_from(">I", data, offs)[0])
Exemplo n.º 43
0
# Read in the original ROM
if romFileName != "":
    print("File '" + romFileName + "' found.")
    with open(romFileName, mode="rb") as file:
        fileContent = bytearray(file.read())

        # Check if ROM needs to be byte swapped
        if fileContent[0] == 0x40:
            # Byte Swap ROM
            # TODO: This is pretty slow at the moment. Look into optimizing it later...
            print("ROM needs to be byte swapped...")
            i = 0
            while i < len(fileContent):
                tmp = struct.unpack_from("BBBB", fileContent, i)
                struct.pack_into("BBBB", fileContent, i + 0, tmp[3], tmp[2],
                                 tmp[1], tmp[0])
                i += 4

                perc = float(i) / float(len(fileContent))

                if i % (1024 * 1024 * 4) == 0:
                    print(str(perc * 100) + "%")

            print("Byte swapping done.")
else:
    print("Error: Could not find baserom_original.z64/baserom_original.n64.")
    sys.exit(1)

# Strip the overdump
print("Stripping overdump...")
strippedContent = list(fileContent[0:0x3600000])
Exemplo n.º 44
0
def _put_sample(cp, size, i, val, signed=True):
    fmt = _struct_format(size, signed)
    struct.pack_into(fmt, cp, i * size, val)
Exemplo n.º 45
0
 def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
     '''Read partial messages in receive call'''
     ifname = "eth0"
     bytes = ifname.encode("utf-8")
     data1 = bytearray(112)
     data2 = bytearray(32)
     struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
     struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, bytes, 5,
                      16, int_to_bytes(OPER_DOWN))
     struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
     struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
                      int_to_bytes(OPER_DOWN))
     struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
     struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
                      int_to_bytes(OPER_UP))
     m_read_netlink_socket.side_effect = [data1, data2]
     wait_for_media_disconnect_connect(m_socket, ifname)
     self.assertEqual(m_read_netlink_socket.call_count, 2)
Exemplo n.º 46
0
def do_relocation_text(env, text_addr, r):
    # Extract relevant info about symbol that's being relocated
    s = r.sym
    s_bind = s.entry["st_info"]["bind"]
    s_shndx = s.entry["st_shndx"]
    s_type = s.entry["st_info"]["type"]
    r_offset = r["r_offset"] + text_addr
    r_info_type = r["r_info_type"]
    try:
        # only for RELA sections
        r_addend = r["r_addend"]
    except KeyError:
        r_addend = 0

    # Default relocation type and name for logging
    reloc_type = "le32"
    log_name = None

    if (env.arch.name == "EM_386" and r_info_type in (R_386_PC32, R_386_PLT32)
            or env.arch.name == "EM_X86_64"
            and r_info_type in (R_X86_64_PC32, R_X86_64_PLT32)
            or env.arch.name == "EM_ARM"
            and r_info_type in (R_ARM_REL32, R_ARM_THM_CALL, R_ARM_THM_JUMP24)
            or s_bind == "STB_LOCAL" and env.arch.name == "EM_XTENSA"
            and r_info_type == R_XTENSA_32  # not GOT
        ):
        # Standard relocation to fixed location within text/rodata
        if hasattr(s, "resolved"):
            s = s.resolved

        sec = s.section

        if env.arch.separate_rodata and sec.name.startswith(".rodata"):
            raise LinkError(
                "fixed relocation to rodata with rodata referenced via GOT")

        if sec.name.startswith(".bss"):
            raise LinkError(
                "{}: fixed relocation to bss (bss variables can't be static)".
                format(s.filename))

        if sec.name.startswith(".external"):
            raise LinkError(
                "{}: fixed relocation to external symbol: {}".format(
                    s.filename, s.name))

        addr = sec.addr + s["st_value"]
        reloc = addr - r_offset + r_addend

        if r_info_type in (R_ARM_THM_CALL, R_ARM_THM_JUMP24):
            # Both relocations have the same bit pattern to rewrite:
            #   R_ARM_THM_CALL: bl
            #   R_ARM_THM_JUMP24: b.w
            reloc_type = "thumb_b"

    elif (env.arch.name == "EM_386" and r_info_type == R_386_GOTPC
          or env.arch.name == "EM_ARM" and r_info_type == R_ARM_BASE_PREL):
        # Relocation to GOT address itself
        assert s.name == "_GLOBAL_OFFSET_TABLE_"
        addr = env.got_section.addr
        reloc = addr - r_offset + r_addend

    elif (env.arch.name == "EM_386"
          and r_info_type in (R_386_GOT32, R_386_GOT32X)
          or env.arch.name == "EM_ARM" and r_info_type == R_ARM_GOT_BREL):
        # Relcation pointing to GOT
        reloc = addr = env.got_entries[s.name].offset

    elif env.arch.name == "EM_X86_64" and r_info_type in (
            R_X86_64_GOTPCREL,
            R_X86_64_REX_GOTPCRELX,
    ):
        # Relcation pointing to GOT
        got_entry = env.got_entries[s.name]
        addr = env.got_section.addr + got_entry.offset
        reloc = addr - r_offset + r_addend

    elif env.arch.name == "EM_386" and r_info_type == R_386_GOTOFF:
        # Relocation relative to GOT
        addr = s.section.addr + s["st_value"]
        reloc = addr - env.got_section.addr + r_addend

    elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_SLOT0_OP:
        # Relocation pointing to GOT, xtensa specific
        sec = s.section
        if sec.name.startswith(".text"):
            # it looks like R_XTENSA_SLOT0_OP into .text is already correctly relocated
            return
        assert sec.name.startswith(".literal"), sec.name
        lit_idx = "{}+0x{:x}".format(sec.filename, r_addend)
        lit_ptr = env.xt_literals[lit_idx]
        if isinstance(lit_ptr, str):
            addr = env.got_section.addr + env.got_entries[lit_ptr].offset
            log_name = "GOT {}".format(lit_ptr)
        else:
            addr = env.lit_section.addr + env.lit_entries[lit_ptr].offset
            log_name = "LIT"
        reloc = addr - r_offset
        reloc_type = "xtensa_l32r"

    elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_DIFF32:
        if s.section.name.startswith(".text"):
            # it looks like R_XTENSA_DIFF32 into .text is already correctly relocated
            return
        assert 0

    else:
        # Unknown/unsupported relocation
        assert 0, r_info_type

    # Write relocation
    if reloc_type == "le32":
        (existing, ) = struct.unpack_from("<I", env.full_text, r_offset)
        struct.pack_into("<I", env.full_text, r_offset,
                         (existing + reloc) & 0xFFFFFFFF)
    elif reloc_type == "thumb_b":
        b_h, b_l = struct.unpack_from("<HH", env.full_text, r_offset)
        existing = (b_h & 0x7FF) << 12 | (b_l & 0x7FF) << 1
        if existing >= 0x400000:  # 2's complement
            existing -= 0x800000
        new = existing + reloc
        b_h = (b_h & 0xF800) | (new >> 12) & 0x7FF
        b_l = (b_l & 0xF800) | (new >> 1) & 0x7FF
        struct.pack_into("<HH", env.full_text, r_offset, b_h, b_l)
    elif reloc_type == "xtensa_l32r":
        l32r = unpack_u24le(env.full_text, r_offset)
        assert l32r & 0xF == 1  # RI16 encoded l32r
        l32r_imm16 = l32r >> 8
        l32r_imm16 = (l32r_imm16 + reloc >> 2) & 0xFFFF
        l32r = l32r & 0xFF | l32r_imm16 << 8
        pack_u24le(env.full_text, r_offset, l32r)
    else:
        assert 0, reloc_type

    # Log information about relocation
    if log_name is None:
        if s_type == "STT_SECTION":
            log_name = s.section.name
        else:
            log_name = s.name
    log(LOG_LEVEL_3, "  {:08x} {} -> {:08x}".format(r_offset, log_name, addr))
Exemplo n.º 47
0
 def f64_store(self, addr: int, value: float) -> None:
     self._check_addr(addr)
     struct.pack_into("d", self.memory_view, addr, float(value))
Exemplo n.º 48
0
def do_relocation_text(env, text_addr, r):
    # Extract relevant info about symbol that's being relocated
    s = r.sym
    s_bind = s.entry['st_info']['bind']
    s_shndx = s.entry['st_shndx']
    s_type = s.entry['st_info']['type']
    r_offset = r['r_offset'] + text_addr
    r_info_type = r['r_info_type']
    try:
        # only for RELA sections
        r_addend = r['r_addend']
    except KeyError:
        r_addend = 0

    # Default relocation type and name for logging
    reloc_type = 'le32'
    log_name = None

    if (env.arch.name == 'EM_386' and r_info_type in (R_386_PC32, R_386_PLT32)
        or env.arch.name == 'EM_X86_64' and r_info_type in (R_X86_64_PC32, R_X86_64_PLT32)
        or env.arch.name == 'EM_ARM' and r_info_type in (R_ARM_REL32, R_ARM_THM_CALL, R_ARM_THM_JUMP24)
        or s_bind == 'STB_LOCAL' and env.arch.name == 'EM_XTENSA' and r_info_type == R_XTENSA_32 # not GOT
        ):
        # Standard relocation to fixed location within text/rodata
        if hasattr(s, 'resolved'):
            s = s.resolved

        sec = s.section

        if env.arch.separate_rodata and sec.name.startswith('.rodata'):
            raise LinkError('fixed relocation to rodata with rodata referenced via GOT')

        if sec.name.startswith('.bss'):
            raise LinkError('{}: fixed relocation to bss (bss variables can\'t be static)'.format(s.filename))

        if sec.name.startswith('.external'):
            raise LinkError('{}: fixed relocation to external symbol: {}'.format(s.filename, s.name))

        addr = sec.addr + s['st_value']
        reloc = addr - r_offset + r_addend

        if r_info_type in (R_ARM_THM_CALL, R_ARM_THM_JUMP24):
            # Both relocations have the same bit pattern to rewrite:
            #   R_ARM_THM_CALL: bl
            #   R_ARM_THM_JUMP24: b.w
            reloc_type = 'thumb_b'

    elif (env.arch.name == 'EM_386' and r_info_type == R_386_GOTPC
        or env.arch.name == 'EM_ARM' and r_info_type == R_ARM_BASE_PREL
        ):
        # Relocation to GOT address itself
        assert s.name == '_GLOBAL_OFFSET_TABLE_'
        addr = env.got_section.addr
        reloc = addr - r_offset + r_addend

    elif (env.arch.name == 'EM_386' and r_info_type in (R_386_GOT32, R_386_GOT32X)
        or env.arch.name == 'EM_ARM' and r_info_type == R_ARM_GOT_BREL
        ):
        # Relcation pointing to GOT
        reloc = addr = env.got_entries[s.name].offset

    elif env.arch.name == 'EM_X86_64' and r_info_type == R_X86_64_REX_GOTPCRELX:
        # Relcation pointing to GOT
        got_entry = env.got_entries[s.name]
        addr = env.got_section.addr + got_entry.offset
        reloc = addr - r_offset + r_addend

    elif env.arch.name == 'EM_386' and r_info_type == R_386_GOTOFF:
        # Relocation relative to GOT
        addr = s.section.addr + s['st_value']
        reloc = addr - env.got_section.addr + r_addend

    elif env.arch.name == 'EM_XTENSA' and r_info_type == R_XTENSA_SLOT0_OP:
        # Relocation pointing to GOT, xtensa specific
        sec = s.section
        if sec.name.startswith('.text'):
            # it looks like R_XTENSA_SLOT0_OP into .text is already correctly relocated
            return
        assert sec.name.startswith('.literal'), sec.name
        lit_idx = '{}+0x{:x}'.format(sec.filename, r_addend)
        lit_ptr = env.xt_literals[lit_idx]
        if isinstance(lit_ptr, str):
            addr = env.got_section.addr + env.got_entries[lit_ptr].offset
            log_name = 'GOT {}'.format(lit_ptr)
        else:
            addr = env.lit_section.addr + env.lit_entries[lit_ptr].offset
            log_name = 'LIT'
        reloc = addr - r_offset
        reloc_type = 'xtensa_l32r'

    elif env.arch.name == 'EM_XTENSA' and r_info_type == R_XTENSA_DIFF32:
        if s.section.name.startswith('.text'):
            # it looks like R_XTENSA_DIFF32 into .text is already correctly relocated
            return
        assert 0

    else:
        # Unknown/unsupported relocation
        assert 0, r_info_type

    # Write relocation
    if reloc_type == 'le32':
        existing, = struct.unpack_from('<I', env.full_text, r_offset)
        struct.pack_into('<I', env.full_text, r_offset, (existing + reloc) & 0xffffffff)
    elif reloc_type == 'thumb_b':
        b_h, b_l = struct.unpack_from('<HH', env.full_text, r_offset)
        existing = (b_h & 0x7ff) << 12 | (b_l & 0x7ff) << 1
        if existing >= 0x400000: # 2's complement
            existing -= 0x800000
        new = existing + reloc
        b_h = (b_h & 0xf800) | (new >> 12) & 0x7ff
        b_l = (b_l & 0xf800) | (new >> 1) & 0x7ff
        struct.pack_into('<HH', env.full_text, r_offset, b_h, b_l)
    elif reloc_type == 'xtensa_l32r':
        l32r = unpack_u24le(env.full_text, r_offset)
        assert l32r & 0xf == 1 # RI16 encoded l32r
        l32r_imm16 = l32r >> 8
        l32r_imm16 = (l32r_imm16 + reloc >> 2) & 0xffff
        l32r = l32r & 0xff | l32r_imm16 << 8
        pack_u24le(env.full_text, r_offset, l32r)
    else:
        assert 0, reloc_type

    # Log information about relocation
    if log_name is None:
        if s_type == 'STT_SECTION':
            log_name = s.section.name
        else:
            log_name = s.name
    log(LOG_LEVEL_3, '  {:08x} {} -> {:08x}'.format(r_offset, log_name, addr))
Exemplo n.º 49
0
    iv=VALUES["frame_base"].iv,
    ciphertext=VALUES["frame_base"].ciphertext,
    tag=VALUES["frame_base"].tag,
    sequence_number=2,
    final_frame=False,
)
VALUES["deserialized_body_final_frame_3"] = MessageFrameBody(
    iv=VALUES["final_frame_base"].iv,
    ciphertext=VALUES["final_frame_base"].ciphertext,
    tag=VALUES["final_frame_base"].tag,
    sequence_number=3,
    final_frame=True,
)
VALUES["serialized_header_invalid_object_type"] = bytearray(
    VALUES["serialized_header"])
struct.pack_into(">B", VALUES["serialized_header_invalid_object_type"], 1, 0)
VALUES["serialized_header_invalid_object_type"] = array_byte(
    VALUES["serialized_header_invalid_object_type"])
VALUES["serialized_header_invalid_version"] = bytearray(
    VALUES["serialized_header"])
struct.pack_into(">B", VALUES["serialized_header_invalid_version"], 0, 0)
VALUES["serialized_header_invalid_version"] = array_byte(
    VALUES["serialized_header_invalid_version"])
VALUES["serialized_header_invalid_algorithm"] = VALUES["serialized_header"]
VALUES["serialized_header_disallowed_algorithm"] = VALUES["serialized_header"]
header_value_position = 22
header_value_position += len(VALUES["serialized_encryption_context"])
header_value_position += 2
header_value_position += len(VALUES["serialized_encrypted_data_key"])
VALUES["serialized_header_unknown_content_type"] = bytearray(
    VALUES["serialized_header"])
Exemplo n.º 50
0
    def write_frame(type_, channel, method_sig, args, content):
        chunk_size = connection.frame_max - 8
        offset = 0
        properties = None
        args = str_to_bytes(args)
        if content:
            properties = content._serialize_properties()
            body = content.body
            bodylen = len(body)
            framelen = (
                len(args) +
                (len(properties) or 0) +
                bodylen +
                FRAME_OVERHEAD
            )
            bigbody = framelen > chunk_size
        else:
            body, bodylen, bigbody = None, 0, 0

        if bigbody:
            # ## SLOW: string copy and write for every frame
            frame = (b''.join([pack('>HH', *method_sig), args])
                     if type_ == 1 else b'')  # encode method frame
            framelen = len(frame)
            write(pack('>BHI%dsB' % framelen,
                       type_, channel, framelen, frame, 0xce))
            if body:
                frame = b''.join([
                    pack('>HHQ', method_sig[0], 0, len(body)),
                    properties,
                ])
                framelen = len(frame)
                write(pack('>BHI%dsB' % framelen,
                           2, channel, framelen, frame, 0xce))

                for i in range(0, bodylen, chunk_size):
                    frame = body[i:i + chunk_size]
                    framelen = len(frame)
                    write(pack('>BHI%dsB' % framelen,
                               3, channel, framelen,
                               str_to_bytes(frame), 0xce))

        else:
            # ## FAST: pack into buffer and single write
            frame = (b''.join([pack('>HH', *method_sig), args])
                     if type_ == 1 else b'')
            framelen = len(frame)
            pack_into('>BHI%dsB' % framelen, buf, offset,
                      type_, channel, framelen, frame, 0xce)
            offset += 8 + framelen
            if body:
                frame = b''.join([
                    pack('>HHQ', method_sig[0], 0, len(body)),
                    properties,
                ])
                framelen = len(frame)

                pack_into('>BHI%dsB' % framelen, buf, offset,
                          2, channel, framelen, frame, 0xce)
                offset += 8 + framelen

                framelen = len(body)
                pack_into('>BHI%dsB' % framelen, buf, offset,
                          3, channel, framelen, str_to_bytes(body), 0xce)
                offset += 8 + framelen

            write(view[:offset])

        connection.bytes_sent += 1
Exemplo n.º 51
0
    def perform(self, cmd, msg, tail=None):
        t0 = ticks()  # times the entire command
        rcv_at = ticks()  # last received message for timeout purposes
        got_error = None  # flag to signal the end, False->OK; True->abort with raise
        sz = 0
        next_seq = 0  # next expected sequence number
        ack = -1  # ack for flow-control
        subscribed = False  # flag to pop out of loop waiting for subscription
        output = b""  # output ultimately returned from perform

        # generate an ID we can use for the MQTT topics to match replies
        self._topic_id = MQTT._gen_id(6)

        def on_reply(cli, ud, msg):
            nonlocal sz, next_seq, ack, rcv_at, output, got_error
            self.debug(
                f"Received reply on topic '{msg.topic}' with QoS {msg.qos}")
            # parse message header
            if len(msg.payload) < 2:
                return
            seq = ((msg.payload[0] & 0x7F) << 8) | msg.payload[1]
            last = (msg.payload[0] & 0x80) != 0
            # check sequence number
            if seq < next_seq:
                self.debug(
                    f"Duplicate message, expected seq={next_seq}, got {seq}")
                return
            if seq > next_seq:
                raise click.ClickException(
                    f"Missing message(s), expected seq={next_seq}, got {seq}")
            # handle ACK for long streams (a bit of a hack!)
            if len(msg.payload) - 2 < 10 and msg.payload[2:].startswith(
                    b"SEQ "):
                try:
                    s = int(msg.payload[6:])
                    if s > ack:
                        ack = s
                        print(".", end="")
                        return
                except ValueError:
                    raise click.ClickException("Bad ACK received")
            sz += len(msg.payload) - 2
            output += msg.payload[2:]
            rcv_at = ticks()
            if last:
                dt = ticks() - t0
                self.debug("{:.3f}kB in {:.3f}s -> {:.3f}kB/s".format(
                    sz / 1024, dt, sz / 1024 / dt))
                got_error = False

        def on_error(cli, ud, message):
            nonlocal got_error
            click.echo(message.payload.strip(), err=True)
            got_error = True

        def on_sub(client, userdata, mid, granted_qos):
            nonlocal subscribed
            subscribed = True

        def loop():
            if ticks() - rcv_at > self._timeout:
                raise click.ClickException("Timeout!")
            self._mqclient.loop(0.1)

        # first connect
        self.connect()

        # subscribe to the response topics
        reply_topic = self._mktopic("reply/out")
        err_topic = self._mktopic("reply/err")
        self._mqclient.message_callback_add(reply_topic, on_reply)
        self._mqclient.message_callback_add(err_topic, on_error)
        self._mqclient.on_subscribe = on_sub
        (res, sub_mid) = self._mqclient.subscribe([(reply_topic, 1),
                                                   (err_topic, 1)])
        self.debug(f"Subscribing to {reply_topic} and {err_topic}")
        if res != paho.MQTT_ERR_SUCCESS:
            raise click.ClickException("Subscribe failed")
        while not subscribed:
            loop()

        # iterate through content and send one buffer at a time
        seq = 0
        if isinstance(msg, str):
            msg = msg.encode()
        buf = bytearray(BUFLEN + 2)
        cmd_topic = self._mktopic(cmd, tail=tail)
        flowctrl = len(msg) > 100 * 1024  # hack
        while len(msg) > 0 and got_error is None:
            # make sure we're not more than 16 messages ahead of flow-control ACKs
            while flowctrl and seq - ack > 16 and got_error is None:
                loop()
            # construct outgoing message with 2-byte header (last flag and seq number)
            buf[2:] = msg[:BUFLEN]
            msg = msg[BUFLEN:]
            last = len(msg) == 0
            struct.pack_into("!H", buf, 0, last << 15 | seq)
            # publish
            sz += len(buf)
            self.debug(f"Pub {cmd_topic} #{seq} last={last} len={len(buf)}")
            self._mqclient.publish(cmd_topic, buf, qos=1)
            seq += 1
            loop()
        # self.debug("done publishing")

        # wait for replies
        while got_error is None:
            loop()

        # wrap up
        self._mqclient.unsubscribe(reply_topic)
        self._mqclient.unsubscribe(err_topic)
        if got_error:
            raise click.Abort()
        return output
Exemplo n.º 52
0
 def fill_buffer(self, buffer: bytes, start_pos: int) -> int:
     if not self.total_size:
         raise Exception('calc_real_size is not invoked')
     if len(buffer) - start_pos < 0:
         raise Exception('start_pos > buffer size')
     if start_pos + self.total_size > len(buffer):
         raise Exception('start_pos + total_size > buffer size')
     pos = start_pos
     vehicleId_var = self.m_vehicleId
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + '8s', buffer,
                      pos, vehicleId_var)
     VehicleToCloudRun.__logger.debug(f'fill vehicleId_var:{vehicleId_var}')
     pos += struct.calcsize('8s')
     timestampGNSS_var = self.m_timestampGNSS
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, timestampGNSS_var)
     VehicleToCloudRun.__logger.debug(
         f'fill timestampGNSS_var:{timestampGNSS_var}')
     pos += struct.calcsize('H')
     velocityGNSS_var = self.m_velocityGNSS
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, velocityGNSS_var)
     VehicleToCloudRun.__logger.debug(
         f'fill velocityGNSS_var:{velocityGNSS_var}')
     pos += struct.calcsize('H')
     longitude_var = self.m_longitude
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'I', buffer,
                      pos, longitude_var)
     VehicleToCloudRun.__logger.debug(f'fill longitude_var:{longitude_var}')
     pos += struct.calcsize('I')
     latitude_var = self.m_latitude
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'I', buffer,
                      pos, latitude_var)
     VehicleToCloudRun.__logger.debug(f'fill latitude_var:{latitude_var}')
     pos += struct.calcsize('I')
     elevation_var = self.m_elevation
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'i', buffer,
                      pos, elevation_var)
     VehicleToCloudRun.__logger.debug(f'fill elevation_var:{elevation_var}')
     pos += struct.calcsize('i')
     heading_var = self.m_heading
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'I', buffer,
                      pos, heading_var)
     VehicleToCloudRun.__logger.debug(f'fill heading_var:{heading_var}')
     pos += struct.calcsize('I')
     hdop_var = self.m_hdop
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, hdop_var)
     VehicleToCloudRun.__logger.debug(f'fill hdop_var:{hdop_var}')
     pos += struct.calcsize('B')
     vdop_var = self.m_vdop
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, vdop_var)
     VehicleToCloudRun.__logger.debug(f'fill vdop_var:{vdop_var}')
     pos += struct.calcsize('B')
     tapPos_var = self.m_tapPos
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, tapPos_var)
     VehicleToCloudRun.__logger.debug(f'fill tapPos_var:{tapPos_var}')
     pos += struct.calcsize('B')
     steeringAngle_var = self.m_steeringAngle
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'i', buffer,
                      pos, steeringAngle_var)
     VehicleToCloudRun.__logger.debug(
         f'fill steeringAngle_var:{steeringAngle_var}')
     pos += struct.calcsize('i')
     lights_var = self.m_lights
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, lights_var)
     VehicleToCloudRun.__logger.debug(f'fill lights_var:{lights_var}')
     pos += struct.calcsize('H')
     velocityCAN_var = self.m_velocityCAN
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, velocityCAN_var)
     VehicleToCloudRun.__logger.debug(
         f'fill velocityCAN_var:{velocityCAN_var}')
     pos += struct.calcsize('H')
     acceleration_V_var = self.m_acceleration_V
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, acceleration_V_var)
     VehicleToCloudRun.__logger.debug(
         f'fill acceleration_V_var:{acceleration_V_var}')
     pos += struct.calcsize('H')
     acceleration_H_var = self.m_acceleration_H
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, acceleration_H_var)
     VehicleToCloudRun.__logger.debug(
         f'fill acceleration_H_var:{acceleration_H_var}')
     pos += struct.calcsize('H')
     accelPos_var = self.m_accelPos
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, accelPos_var)
     VehicleToCloudRun.__logger.debug(f'fill accelPos_var:{accelPos_var}')
     pos += struct.calcsize('B')
     engineSpeed_var = self.m_engineSpeed
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'i', buffer,
                      pos, engineSpeed_var)
     VehicleToCloudRun.__logger.debug(
         f'fill engineSpeed_var:{engineSpeed_var}')
     pos += struct.calcsize('i')
     engineTorque_var = self.m_engineTorque
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'i', buffer,
                      pos, engineTorque_var)
     VehicleToCloudRun.__logger.debug(
         f'fill engineTorque_var:{engineTorque_var}')
     pos += struct.calcsize('i')
     brakeFlag_var = self.m_brakeFlag
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, brakeFlag_var)
     VehicleToCloudRun.__logger.debug(f'fill brakeFlag_var:{brakeFlag_var}')
     pos += struct.calcsize('B')
     brakePos_var = self.m_brakePos
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, brakePos_var)
     VehicleToCloudRun.__logger.debug(f'fill brakePos_var:{brakePos_var}')
     pos += struct.calcsize('B')
     brakePressure_var = self.m_brakePressure
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, brakePressure_var)
     VehicleToCloudRun.__logger.debug(
         f'fill brakePressure_var:{brakePressure_var}')
     pos += struct.calcsize('H')
     yawRate_var = self.m_yawRate
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, yawRate_var)
     VehicleToCloudRun.__logger.debug(f'fill yawRate_var:{yawRate_var}')
     pos += struct.calcsize('H')
     wheelVelocity_FL_var = self.m_wheelVelocity_FL
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, wheelVelocity_FL_var)
     VehicleToCloudRun.__logger.debug(
         f'fill wheelVelocity_FL_var:{wheelVelocity_FL_var}')
     pos += struct.calcsize('H')
     wheelVelocity_RL_var = self.m_wheelVelocity_RL
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, wheelVelocity_RL_var)
     VehicleToCloudRun.__logger.debug(
         f'fill wheelVelocity_RL_var:{wheelVelocity_RL_var}')
     pos += struct.calcsize('H')
     wheelVelocity_RR_var = self.m_wheelVelocity_RR
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'H', buffer,
                      pos, wheelVelocity_RR_var)
     VehicleToCloudRun.__logger.debug(
         f'fill wheelVelocity_RR_var:{wheelVelocity_RR_var}')
     pos += struct.calcsize('H')
     absFlag_var = self.m_absFlag
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, absFlag_var)
     VehicleToCloudRun.__logger.debug(f'fill absFlag_var:{absFlag_var}')
     pos += struct.calcsize('B')
     tcsFlag_var = self.m_tcsFlag
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, tcsFlag_var)
     VehicleToCloudRun.__logger.debug(f'fill tcsFlag_var:{tcsFlag_var}')
     pos += struct.calcsize('B')
     espFlag_var = self.m_espFlag
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, espFlag_var)
     VehicleToCloudRun.__logger.debug(f'fill espFlag_var:{espFlag_var}')
     pos += struct.calcsize('B')
     lkaFlag_var = self.m_lkaFlag
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, lkaFlag_var)
     VehicleToCloudRun.__logger.debug(f'fill lkaFlag_var:{lkaFlag_var}')
     pos += struct.calcsize('B')
     accMode_var = self.m_accMode
     struct.pack_into(Constant.CLOUD_PROTOCOL_ENDIAN_SIGN + 'B', buffer,
                      pos, accMode_var)
     VehicleToCloudRun.__logger.debug(f'fill accMode_var:{accMode_var}')
     pos += struct.calcsize('B')
     return pos
Exemplo n.º 53
0
 def move_next_from_pgthb_to_pgtha(self):
     pid, thid, process, thread = struct.unpack_from('=IQBB', self.pgthb, 4)
     self.pgtha_pid = pid
     self.pgtha_thread = thid
     struct.pack_into('=IQBB', self.pgtha, 0, pid, thid, process, thread)
Exemplo n.º 54
0
 def pack(self, structure, offset, *values):
     struct.pack_into("%s%s" % (self._endian, structure), self.data, offset,
                      *values)
Exemplo n.º 55
0
def adsSumWrite(
    port: int,
    address: AmsAddr,
    data_names_and_values: Dict[str, Any],
    data_symbols: Dict[str, SAdsSymbolEntry],
) -> Dict[str, ADSError]:
    """Perform a sum write to write the value of multiple ADS variables

    :param int port: local AMS port as returned by adsPortOpenEx()
    :param pyads.structs.AmsAddr address: local or remote AmsAddr
    :param data_names_and_values: dict of variable names and values to be written
    :type data_names_and_values: dict[str, Any]
    :param data_symbols: list of dictionaries of ADS Symbol Info
    :type data_symbols: dict[str, ADSSymbolInfo]
    :return: result: dict of variable names and error codes
    :rtype: dict[str, ADSError]
    """
    offset = 0
    num_requests = len(data_names_and_values)
    total_request_size = num_requests * 3 * 4  # iGroup, iOffset & size

    for data_name in data_names_and_values.keys():
        total_request_size += data_symbols[data_name].size

    buf = bytearray(total_request_size)

    for data_name in data_names_and_values.keys():
        struct.pack_into("<I", buf, offset, data_symbols[data_name].iGroup)
        struct.pack_into("<I", buf, offset + 4, data_symbols[data_name].iOffs)
        struct.pack_into("<I", buf, offset + 8, data_symbols[data_name].size)
        offset += 12

    for data_name, value in data_names_and_values.items():
        if (data_symbols[data_name].dataType != ADST_STRING
                and data_symbols[data_name].dataType != ADST_WSTRING):
            struct.pack_into(
                DATATYPE_MAP[ads_type_to_ctype[
                    data_symbols[data_name].dataType]],
                buf,
                offset,
                value,
            )
        else:
            buf[offset:offset + len(value)] = value.encode("utf-8")
        offset += data_symbols[data_name].size

    sum_response = adsSyncReadWriteReqEx2(
        port,
        address,
        ADSIGRP_SUMUP_WRITE,
        num_requests,
        None,
        buf,
        None,
        return_ctypes=False,
        check_length=False,
    )

    errors = list(struct.iter_unpack("<I", sum_response))
    error_descriptions = [ERROR_CODES[i[0]] for i in errors]

    return dict(zip(data_names_and_values.keys(), error_descriptions))
Exemplo n.º 56
0
 def write_value(self, key, value):
     if key not in self._positions:
         self._init_value(key)
     pos = self._positions[key]
     # We assume that writing to an 8 byte aligned value is atomic
     struct.pack_into(b'd', self._m, pos, value)
    def convert_from_older_version(
        self, msg: AbstractInternalMessage
    ) -> AbstractInternalMessage:
        msg_type = msg.MESSAGE_TYPE

        if msg_type not in self._MSG_TYPE_TO_NEW_MSG_CLASS_MAPPING:
            raise ValueError(
                f"Tried to convert unexpected old message type from v10: {msg_type}"
            )

        new_msg_class = self._MSG_TYPE_TO_NEW_MSG_CLASS_MAPPING[msg_type]
        new_payload_len = msg.payload_len() + self._LENGTH_DIFFERENCE

        default_new_stats = 0
        new_msg_bytes = bytearray(self._NEW_MESSAGE_LEN)
        new_msg_bytes[:self._INTERVAL_TIMES_BREAKPOINT] = msg.rawbytes()[:self._INTERVAL_TIMES_BREAKPOINT]
        off = self._BASE_LENGTH + self._INTERVAL_TIMES_LENGTH

        # memory
        struct.pack_into("<H", new_msg_bytes, off, default_new_stats)
        off += constants.UL_SHORT_SIZE_IN_BYTES

        # single blockchain peer
        struct.pack_into("<H", new_msg_bytes, off, 1)
        off += constants.UL_SHORT_SIZE_IN_BYTES

        # placeholder ip/port
        message_utils.pack_ip_port(new_msg_bytes, off, "0.0.0.0", 0)
        off += constants.IP_ADDR_SIZE_IN_BYTES + constants.UL_SHORT_SIZE_IN_BYTES

        new_msg_bytes[off:off + self._FIRST_STATS_SETS_LENGTH] = \
            msg.rawbytes()[
            self._INTERVAL_TIMES_BREAKPOINT:self._INTERVAL_TIMES_BREAKPOINT + self._FIRST_STATS_SETS_LENGTH
            ]
        off += self._FIRST_STATS_SETS_LENGTH

        struct.pack_into("<I", new_msg_bytes, off, default_new_stats)
        off += constants.UL_INT_SIZE_IN_BYTES
        struct.pack_into("<I", new_msg_bytes, off, default_new_stats)
        off += constants.UL_INT_SIZE_IN_BYTES
        struct.pack_into("<I", new_msg_bytes, off, default_new_stats)
        off += constants.UL_INT_SIZE_IN_BYTES
        struct.pack_into("<I", new_msg_bytes, off, default_new_stats)
        off += constants.UL_INT_SIZE_IN_BYTES
        struct.pack_into("<I", new_msg_bytes, off, default_new_stats)
        off += constants.UL_INT_SIZE_IN_BYTES

        new_msg_bytes[off:] = msg.rawbytes()[self._FIRST_STATS_SETS_BREAKPOINT:]

        return AbstractBloxrouteMessage.initialize_class(
            new_msg_class,
            new_msg_bytes,
            (msg_type, new_payload_len)
        )
Exemplo n.º 58
0
    def _choose_level(self,
                      environment_id,
                      level_data=None,
                      include_semantic_data=False,
                      pixel_observations=True,
                      tile_observations=False):

        environment_id_bytes = environment_id.encode()
        environment_id_bytes_length = len(environment_id_bytes)

        level_data_bytes_length = 0
        # If we have a custom environment we should also pass the data
        if 'custom' in environment_id:
            if level_data is None:
                level_data = 'wwww\nwA.w\nw0ww\nwwww\n'
            level_data_bytes = level_data.encode()
            level_data_bytes_length = len(level_data_bytes)

        choose_level_data = bytearray(4 + environment_id_bytes_length + 7 +
                                      level_data_bytes_length)

        # Environment Id
        pack_into('>i', choose_level_data, 0, environment_id_bytes_length)
        pack_into('%ds' % environment_id_bytes_length, choose_level_data, 4,
                  environment_id_bytes)

        # Environment Data Options
        pack_into('?', choose_level_data, environment_id_bytes_length + 4,
                  include_semantic_data)
        pack_into('?', choose_level_data, environment_id_bytes_length + 5,
                  tile_observations)
        pack_into('?', choose_level_data, environment_id_bytes_length + 6,
                  pixel_observations)

        # Level Data
        pack_into('>i', choose_level_data, environment_id_bytes_length + 7,
                  level_data_bytes_length)
        if level_data_bytes_length > 0:
            data_start = environment_id_bytes_length + 11
            pack_into('%ds' % level_data_bytes_length, choose_level_data,
                      data_start, level_data_bytes)

        self.io.writeToServer(AgentPhase.CHOOSE_LEVEL_STATE, choose_level_data)
Exemplo n.º 59
0
 def pack_into(self, struct, offset, *args):
     struct.pack_into(self._buf, offset, *args)
    def _transmit_missing_seq_nums(self, seq_nums, transceiver, placement):
        # locate missing seq nums from pile

        missing_seq_nums = self._calculate_missing_seq_nums(seq_nums)
        self._lost_seq_nums.append(len(missing_seq_nums))
        # self._print_missing(seq_nums)
        if len(missing_seq_nums) == 0:
            return True

        # print("doing retransmission")
        # figure n packets given the 2 formats
        n_packets = 1
        length_via_format2 = \
            len(missing_seq_nums) - (self.DATA_PER_FULL_PACKET - 2)
        if length_via_format2 > 0:
            n_packets += int(
                math.ceil(
                    float(length_via_format2) /
                    float(self.DATA_PER_FULL_PACKET - 1)))

        # transmit missing seq as a new sdp packet
        first = True
        seq_num_offset = 0
        for _packet_count in range(0, n_packets):
            length_left_in_packet = self.DATA_PER_FULL_PACKET
            offset = 0
            data = None
            size_of_data_left_to_transmit = None

            # if first, add n packets to list
            if first:

                # get left over space / data size
                size_of_data_left_to_transmit = min(
                    length_left_in_packet - 2,
                    len(missing_seq_nums) - seq_num_offset)

                # build data holder accordingly
                data = bytearray((size_of_data_left_to_transmit + 2) *
                                 self.WORD_TO_BYTE_CONVERTER)

                # pack flag and n packets
                struct.pack_into("<I", data, offset,
                                 self.SDP_PACKET_START_MISSING_SEQ_COMMAND_ID)
                struct.pack_into("<I", data, self.WORD_TO_BYTE_CONVERTER,
                                 n_packets)

                # update state
                offset += 2 * self.WORD_TO_BYTE_CONVERTER
                length_left_in_packet -= 2
                first = False

            else:  # just add data
                # get left over space / data size
                size_of_data_left_to_transmit = min(
                    self.DATA_PER_FULL_PACKET_WITH_SEQUENCE_NUM,
                    len(missing_seq_nums) - seq_num_offset)

                # build data holder accordingly
                data = bytearray((size_of_data_left_to_transmit + 1) *
                                 self.WORD_TO_BYTE_CONVERTER)

                # pack flag
                struct.pack_into("<I", data, offset,
                                 self.SDP_PACKET_MISSING_SEQ_COMMAND_ID)
                offset += 1 * self.WORD_TO_BYTE_CONVERTER
                length_left_in_packet -= 1

            # fill data field
            struct.pack_into(
                "<{}I".format(size_of_data_left_to_transmit), data, offset,
                *missing_seq_nums[seq_num_offset:seq_num_offset +
                                  size_of_data_left_to_transmit])
            seq_num_offset += length_left_in_packet

            # build sdp message
            message = SDPMessage(sdp_header=SDPHeader(
                destination_chip_x=placement.x,
                destination_chip_y=placement.y,
                destination_cpu=placement.p,
                destination_port=self.SDP_PACKET_PORT,
                flags=SDPFlag.REPLY_NOT_EXPECTED),
                                 data=data)

            # debug
            # self._print_out_packet_data(data)

            # send message to core
            transceiver.send_sdp_message(message=message)

            # sleep for ensuring core doesnt lose packets
            time.sleep(self.TIME_OUT_FOR_SENDING_IN_SECONDS)
            # self._print_packet_num_being_sent(packet_count, n_packets)
        return False