Exemplo n.º 1
0
    def unpack_data(data_string, bit_assign):
        """Decodes data_string to signal value.

        Args:
            data_string (str): Data string
            bit_assign (BitAssign): A BitAssign object

        Returns:
            (str): Decoded value

        """
        c = bitstring.ConstBitArray(hex=data_string.replace(" ", ""))
        if "." in bit_assign.data_position:
            data_position_1 = int(bit_assign.data_position.split(".")[0])
            data_position_2 = int(bit_assign.data_position.split(".")[1])
        else:
            data_position_1 = int(bit_assign.data_position)
            data_position_2 = 0
        start_position = data_position_1 * 8 - data_position_2
        data_len = int(bit_assign.data_length)  # bits

        data = bitstring.ConstBitArray(bin=c.bin[(start_position -
                                                  data_len):start_position])

        if bit_assign.signed == '1':
            base_value = data.int
        else:
            base_value = data.uint

        value = base_value * float(bit_assign.resolution) + float(
            bit_assign.offset_physical)

        return value
Exemplo n.º 2
0
    def analyze_csv(self, path_to_csv="", start_time=None, end_time=None):
        """Returns list of CANData.

        Args:
            path_to_csv (str): Path to a csv file containing CAN data
            start_time (datetime): Start-time to extract data
            end_time (datetime): End-time to extract data

        Returns:
            (list): A list of CANData

        """
        if path_to_csv == "":
            path_to_csv = self.path_to_csv
        else:
            self.path_to_csv = path_to_csv

        # check file existence
        if not os.path.exists(path_to_csv):
            self.logger.warning("file does not exists: {}".format(path_to_csv))
            return None

        # prepare
        data = []
        line_index = 1
        line_datetime = None

        # start analyzing
        self.logger.debug("started loading csv file")
        with open(path_to_csv, 'rU') as f:
            reader = csv.reader(f)
            _ = next(reader)  # Header

            for row in reader:
                timestamp = row[2]
                can_id = bitstring.ConstBitArray(uint=int(
                    row[5]), length=32).hex.lstrip("0").lower()
                _ = int(row[6])  # can_length
                can_data = "".join([
                    bitstring.ConstBitArray(uint=int(b), length=8).hex
                    for b in row[7:15]
                ])
                for bit_assign in self.bit_assign_info.bit_assigns_from_can_id(
                        can_id):
                    data.append(
                        CANData(timestamp, bit_assign.data_label,
                                self.unpack_data(can_data, bit_assign)))
                if line_index % 500000 == 0:
                    self.logger.debug("{} lines decoded".format(line_index))
                line_index += 1

        # check
        if line_datetime is not None and line_datetime < end_time:
            self.logger.error(
                "This csv file ends {0} before end_time {1}".format(
                    line_datetime, end_time))
            return []

        # return
        return data
Exemplo n.º 3
0
 def testReverseDict(self):
     d = bitstring.constbitarray.BYTE_REVERSAL_DICT
     for i in range(256):
         a = bitstring.ConstBitArray(uint=i, length=8)
         b = d[i]
         self.assertEqual(a.bin[2:][::-1],
                          bitstring.ConstBitArray(bytes=b).bin[2:])
Exemplo n.º 4
0
def get_special_payload_bits(payload):
    payload_bits = bitstring.BitArray()

    for b_int in bytes(payload):  # assumes the checksum byte is _in_ `payload`
        b_bits = bitstring.BitArray(bytes=b_int)
        b_bits.reverse()

        payload_bits.append(bitstring.ConstBitArray(bin='0'))  # start bit
        payload_bits.append(b_bits)  # bit-reversed byte
        payload_bits.append(bitstring.ConstBitArray(bin='1'))  # stop bit

    return payload_bits
Exemplo n.º 5
0
    def __init__(self, dict1: int, dict2: int, dict_data_length_in_bytes: int,
                 rank: int, huffman_code):
        """
        turn both dict(ionary) values from ints into arrays of bytes. the dict_data_length_in_bytes parameter
        tells me how many bytes to have...i.e. how many (if any), 0x00 padding bytes are needed "on the left" side
        e.g. if the dict1 value is 2, then this would plainly be 0x02 as a byte.
        However if dict_data_length_in_bytes == 3, then the result desired is [0x00, 0x00, 0x02] ,
            which is 3 bytes in size
        ===> bytearray() does this, in conjunction with integer.to_bytes(). Use "big"(endian) as the
              parameter to have it not swap bytes around from how they are stores in the table; i.e. convert to bytes
              "literally" and not interpreting it as an "int"
        :param dict1: dict1 value from the Huffman table, as a raw int
        :param dict2: dict2 value from the Huffman table, as a raw int
        :param dict_data_length_in_bytes: how many bytes the dictX values are supposed to expand to (see comments)
        :param rank: the size, in bits of the Huffman code
        :param huffman_code: the Huffman code, as an ASCII string of bits , e.g. '00011001'
        """

        self.dict1_value = bytearray(
            dict1.to_bytes(dict_data_length_in_bytes, "big"))
        self.dict2_value = bytearray(
            dict2.to_bytes(dict_data_length_in_bytes, "big"))
        self.dictionary_data_length = dict_data_length_in_bytes
        self.huffman_code_rank = rank
        self.huffman_code = huffman_code
        self.huffman_code_bits = bitstring.ConstBitArray("0b" +
                                                         str(huffman_code))
Exemplo n.º 6
0
    def _check_huffman_table_random_entries(self, truth_csv_file_fqpn, assert_message_tag: str):
        with open(truth_csv_file_fqpn, newline='') as cvsfile:
            reader = csv.reader(cvsfile)
            huffman_table = get_huffman_table()
            for dict1_str, dict2_str, length_str, depth_str, huffman_code in reader:
                huffman_code_bits = bitstring.ConstBitArray("0b" + str(huffman_code))

                dict1_bytes = bytearray((int(dict1_str)).to_bytes(int(length_str), "big"))
                dict2_bytes = bytearray(int(dict2_str).to_bytes(int(length_str), "big"))
                # check_hte = HuffmanTableEntry(int(dict1), int(dict2), int(length), int(depth), huffman_code)
                hte: HuffmanTableEntry = huffman_table.get(huffman_code_bits)
                self.assertIsNotNone(hte, ("" if assert_message_tag is None else assert_message_tag) +
                                     ": Huffman table should contain entry with huffman code " + str(huffman_code_bits))
                self.assertEqual(dict1_bytes, hte.dict1_value,
                                 ("" if assert_message_tag is None else assert_message_tag) +
                                 ": entry {0} has incorrect 'dictionary word' #1"
                                 .format(str(huffman_code_bits)))
                self.assertEqual(dict2_bytes, hte.dict2_value,
                                 ("" if assert_message_tag is None else assert_message_tag) +
                                 ": entry {0} has incorrect 'dictionary word' #2"
                                 .format(str(huffman_code_bits)))
                self.assertEqual(hte.huffman_code_rank, int(depth_str),
                                 ("" if assert_message_tag is None else assert_message_tag) +
                                 ": entry {0} is of incorrect 'depth'/'rank'".format(str(huffman_code_bits)))
                self.assertEqual(hte.dictionary_data_length, int(length_str),
                                 ("" if assert_message_tag is None else assert_message_tag) +
                                 ": entry {0} is of incorrect 'length' (number of bytes of the 'dictionary word')"
                                 .format(str(huffman_code_bits)))
Exemplo n.º 7
0
def process_lines(candump_file):
    for candump_line in candump_file.readlines():
        if candump_line == '\n':
            continue

        try:
            message = candump_line.split()[2]
            message_id = bitstring.ConstBitArray(hex=message.split('#')[0])
            message_data = bitstring.ConstBitArray(hex=message.split('#')[1])
        except (IndexError, ValueError):
            print("Warning: error in line '%s'" % candump_line,
                  file=sys.stderr)
            continue

        desc_line = ''

        description = describe(message_data.bytes, message_id.uint)
        if args.format:
            json_description = str(json.dumps(description, indent=4))
        else:
            json_description = str(
                json.dumps(description, separators=(',', ':')))
        if len(description) > 0:
            desc_line = desc_line + json_description

        if args.candata:
            can_line = candump_line.rstrip() + " ; "
            if not args.format:
                desc_line = can_line + desc_line
            else:
                formatted_lines = desc_line.splitlines()
                if len(formatted_lines) == 0:
                    desc_line = can_line
                else:
                    first_line = formatted_lines[0]
                    desc_line = can_line + first_line
                    formatted_lines.remove(first_line)

                for line in formatted_lines:
                    desc_line = desc_line + '\n' + ' ' * len(
                        candump_line) + "; " + line

        if len(desc_line) > 0:
            print(desc_line)
Exemplo n.º 8
0
def position_data_index(fn):
    """Given the filename of a position file, return the bit index of the first
    data record past the header.
    """
    hdr_end = "%%ENDHEADER\r\n"
    token = bstr.ConstBitArray(bytearray(hdr_end))
    bits = bstr.ConstBitStream(filename=fn)
    bits.find(token)
    bits.read('bytes:%d' % len(hdr_end))
    return bits.pos
Exemplo n.º 9
0
    def msg_to_data(self, msg, **kwargs):
        """Convert a message to data."""
        if self.can_decoder is not None:
            can_id = bitstring.ConstBitArray(uint=int(
                msg.id), length=32).hex.lstrip("0").lower()
            can_data = bitstring.ConstBitArray(bytes=msg.dat).hex

            data = []
            for bit_assign in self.can_decoder.bit_assign_info.signal_list.values(
            ):
                if can_id.replace(" ", "") == bit_assign.can_id:
                    data.append(
                        self.can_decoder.unpack_data(can_data, bit_assign))
                else:
                    data.append(None)

            return data
        else:
            return [msg.count, msg.id, msg.len, msg.dat, msg.flag, msg.time]
Exemplo n.º 10
0
 def testBaselineMemory(self):
     try:
         import pympler.asizeof.asizeof as size
     except ImportError:
         return
     # These values might be platform dependent, so don't fret too much.
     self.assertEqual(size(bitstring.ConstBitStream([0])), 64)
     self.assertEqual(size(bitstring.ConstBitArray([0])), 64)
     self.assertEqual(size(bitstring.BitStream([0])), 64)
     self.assertEqual(size(bitstring.BitArray([0])), 64)
     from bitstring.bitstore import ByteArray
     self.assertEqual(size(ByteArray(bytearray())), 100)
def read_ts(bs):
    ts_bits = bs.read(40)
    ts_bits.pos += 4
    part1 = ts_bits.read(3)
    ts_bits.pos += 1
    part2 = ts_bits.read(15)
    ts_bits.pos += 1
    part3 = ts_bits.read(15)
    ts_bits.pos += 1
    parts = [part1.bin, part2.bin, part3.bin]
    ts = bitstring.ConstBitArray().join(['0b' + part for part in parts])
    # print('read', ts.bin)
    decoded = ts.uint
    return decoded
Exemplo n.º 12
0
#
# Copyright (c) 2019 National Motor Freight Traffic Association Inc. All Rights Reserved.
# See the file "LICENSE" for the full license governing this code.
#

import json
import bitstring
import sys
import math
from collections import OrderedDict

PGN_LABEL = 'PGN'

NA_NAN = float('nan')
EMPTY_BITS = bitstring.ConstBitArray(bytes=b'')

DA_MASK = 0x0000FF00
SA_MASK = 0x000000FF
PF_MASK = 0x00FF0000
TM_MASK = 0x00EB0000
CM_MASK = 0x00EC0000
ACK_MASK = 0x0E80000


class DADescriber:
    pgn_objects = dict()
    spn_objects = dict()
    address_names = dict()
    bit_encodings = dict()

    def __init__(self, da_json, describe_pgns, describe_spns,
Exemplo n.º 13
0
def decode_page_from_input_file_i(inp,
                                  lut_entry_for_page: LUTentry,
                                  outp,
                                  offset_into_output,
                                  use_relative_seek=True):
    """
    :param inp:    input stream (e.g. result of open()) for reading. Position in the input file doesn't matter.
    :param lut_entry_for_page: the LUT table entry giving information about the page from the input that's to be decoded
    :param outp:   output stream, opened for writing binary data. Position doesn't matter
    :param offset_into_output: the position to seek() into the outp stream prior to writing any data
    :param use_relative_seek: debug reserved (caller: leave at default value for normal operation)

    :return: if error, then < 0
        on success:  the total number of bytes written to the output file during this function run
            (NOTE: this is NOT an absolute position in the output file; rather, relative to the 'offset_into_output'
            parameter that was passed in to the function)
    """
    # typehint this for easier debugging
    found: HuffmanTableEntry

    # relative position trackers for input and output stream, to keep track where I am in the input page and output
    #   "page"
    read_position = 0
    write_position = 0
    # cue up to start writing at the correct position in the output file; This is passed in by the caller explicitly,
    #   as this is expected to be tracked externally. The caller may just decide to write the pages to different files,
    #   or any number of conceivable schemes...
    outp.seek(offset_into_output)

    # how much to refill the bitbuffer (from the input stream) when it runs low on bits (i.e. won't be able to match
    #   the longest_huffman_code_in_bits -sized codes in the table)
    #   This is purely heuristically decided; it may or may not have much effect on decoding speed; I have not done
    #      much testing or benchmarking on it. In any case, don't set it to be LESS THAN longest_huffman_code_in_bits
    #       (or else the bitbuffer won't fill enough in the worst case, and the decoding will end prematurely)
    bitbuffer_topoff_level = longest_huffman_code_in_bits * 10

    if lut_entry_for_page.size == 0:
        # this is the final page, which ends at the end of the file
        # find the end of the file; not strictly needed but helps with debugging (instead could set end as practical
        # infinity, like MAX_LONGINT)
        # inp.seek(0, os.SEEK_END)
        # lut_entry_for_page.size = inp.tell() - lut_entry_for_page.offset
        lut_entry_for_page.size = HUFFMAN_PAGE_DECODED_SIZE_MAX

    # seek to the correct offset for the start of the page
    if use_relative_seek:
        inp.seek(lut_entry_for_page.offset, os.SEEK_CUR)
    else:
        inp.seek(lut_entry_for_page.offset, os.SEEK_SET)

    end_page_position = lut_entry_for_page.offset + lut_entry_for_page.size

    # initial read of the input data:
    # read enough bits to represent the longest huffman code (largest 'rank') in the table
    bytes_for_longest_code = (int(longest_huffman_code_in_bits / 8) +
                              (1 if
                               (longest_huffman_code_in_bits % 8 > 0) else 0))
    # don't read past the end of the file (very unlikely here since I am just starting!)
    bytes_to_read_next: int = int(
        min(bytes_for_longest_code, end_page_position - read_position))
    in_data = inp.read(bytes_to_read_next)
    read_position += len(in_data)

    # initialize the bitbuffer; initially it has 0 valid bits in it
    valid_bits_in_bitbuffer = 0
    bitbuffer = bitstring.BitArray()

    # ...and now go into a loop, reading and matching codes...
    # The essence of the routine is that the bitbuffer gets bits from the input "added in" (much like a shift-in)
    #   "from the right side" (if you imagine the bitbuffer like a pipe with a left and right opening). There are always
    #   enough bits added in so as to have *at least* enough bits in the bitbuffer to match the Huffman codes in the
    #   table that are the "longest" (i.e. consist of the most bits; I call it "rank" in the code table entries).
    #   (the longest and shortest code in bits is pre-calculated and stored in longest_huffman_code_in_bits, and
    #       shortest_huffman_code_in_bits)
    #   Then the bitbuffer is "slice-copied" "from the left", in a loop, with the slices getting consecutively smaller
    #       each time through the loop, from the longest code possible to the shortest. So e.g. if the longest code was
    #       10 bits in size and the shortest was 8, there would be 3 iterations of the loop, the 1st iter. slicing off
    #       10 bits from [0:10], the 2nd slicing off [0:9] and the 3rd iter. slicing off 8 bits, from [0:8]...and each
    #       slice (which is just another bitbuffer) being checked against the hashtable(*) of all loaded Huffman codes
    #       which was done previously in the script, from the code table input file
    #       (*) (Python: hash table == "dictionary", but not to be confused with the Huffman Code table use of the word
    #           "dictionary" to mean 'decoded data')
    #   When a match is found, the corresponding translation to decoded data (in the code table entry retrieved from
    #       the hashtable), is written to the output, and the size in bits of the match is "invalidated" "from the left"
    #       of the bitbuffer; these bits are subsequently deleted/trimmed-off when the loop reiterates. Effectively,
    #       this reduces the size of the bitbuffer, and if it falls below the threshold of bits, more data is read in
    #       from the input....(and now start reading from the top of this comment block again).
    #   There are TWO termination conditions of the loop (which are considered non-error/valid):
    #   1.  The output reaches HUFFMAN_PAGE_DECODED_SIZE_MAX bytes in size.
    #       By convention/design of this particular variant of Huffman table
    #       encoding, no output page is ever larger than this. As soon as this limit is reached, decoding stops
    #       (the function returns immediately in this case)
    #   2.  The entire input page is read (by the read_position reaching the end_position, as determined by the "size"
    #       of the LUT table entry for the page that was passed in as a parameter).
    #       In this case, the bitbuffer is checked to see if it contains any bits, and if so, if it contains AT LEAST
    #       shortest_huffman_code_in_bits number of bits. If so, then the matching loop is repeated as described above,
    #       except this time, the bitbuffer is not "re-filled" from input data; it simply keeps matching until empty or
    #       no more valid matches can be made. At that point, decoding is considered complete
    #       (Note: even in case #2, the output size is checked, and if it reaches HUFFMAN_PAGE_DECODED_SIZE_MAX,
    #       decoding stops immediately regardless of bits left over in the bitbuffer, if any
    #       (these are considered extraneous/pad/junk bits)
    #
    while read_position < end_page_position:

        new_data_size_in_bits = 8 * len(in_data)
        # prune the bitbuffer to the number of valid bits
        del bitbuffer[0:len(bitbuffer) - valid_bits_in_bitbuffer]
        if len(in_data) > 0:
            # it is safe that this doesn't have an 'else' clause; In case of no new data, the bitbuffer just
            # continues on in its present state, with its existing bits. In fact, this is exactly what happens
            # when the while-loop containing this if-clause exits. Admittedly, it's pretty strange it would happen here
            # as the read_position is still within the page (thus there should be more data!); the caller really
            # shouldn't have called this function if there was no new data. However, it's not *necessarily* a
            # fatal error...if enough bits exist in the bitbuffer to make a match.
            # (and if not, then this function will error out safely anyways; see below, "if not found:" )
            # ...so might as well let it try...
            bitbuffer.append(
                bitstring.BitArray(bytes=in_data,
                                   length=new_data_size_in_bits))
            # print("bitbuffer is up to "+str(len(bitbuffer))+" bits size")
            in_data = []

        valid_bits_in_bitbuffer += new_data_size_in_bits
        longest_code_possible = int(
            min(longest_huffman_code_in_bits, valid_bits_in_bitbuffer))
        found = None
        nbits_of_hcode = 0
        for nbits_of_hcode in range(longest_code_possible,
                                    shortest_huffman_code_in_bits - 1, -1):
            candidate_code = bitstring.ConstBitArray(
                bitbuffer[0:nbits_of_hcode])
            found = huffman_table.get(candidate_code)
            if found:
                break
        # (end matching loop)
        if not found:
            # error...no code found
            print("error: no matching code for current bitbuffer: " +
                  str(bitbuffer.bin) + " ; aborting.")
            return -1
        # else...
        # a code was found.
        dcode_to_write = found.dict2_value if lut_entry_for_page.dictionary_selector == 1 else found.dict1_value
        outp.write(dcode_to_write)
        write_position += len(dcode_to_write)
        if write_position >= HUFFMAN_PAGE_DECODED_SIZE_MAX:
            # this is the page limit...exit (not an error)
            return write_position
        # it's nbits_of_hcode in size, so this consumes nbits_of_hcode of bits in the bitbuffer
        # ===> denote that fact by decreasing the valid_bits_in_bitbuffer var
        valid_bits_in_bitbuffer -= nbits_of_hcode

        if valid_bits_in_bitbuffer < longest_huffman_code_in_bits:
            # check to see if the bitbuffer is "running low" (i.e. won't have enough to match the longest known code)
            # If so, decide how much more to read from the input stream to "top off" the bitbuffer, and read it
            # (the read-in data will be added to the bitbuffer once the loop re-loops)
            bytes_to_read_next = int(bitbuffer_topoff_level / 8) + (
                1 if bitbuffer_topoff_level % 8 > 0 else 0)
            # ...but fix-up this value so that the read doesn't go past the end of the page from the input...
            bytes_to_read_next = int(
                min(bytes_to_read_next, end_page_position - read_position))
            # now that that's figured out, actually read in the data and then update the read_position tracking var
            in_data = inp.read(bytes_to_read_next)
            read_position += len(in_data)
        else:
            # still O.k. in the bitbuffer...plenty of bits left to do matching; no need to go read more just yet
            # ((re)set these variables for safety and consistency for the next loop iteration)
            bytes_to_read_next = 0
            in_data = []
    # (end while loop ; while more bytes to read in the current page of coded data)

    # reached here because there are no more bytes to read from the input...check to see if there is anything left
    # in the bitbuffer. If there is, then attempt matches until either: 1. nothing matches or
    # 2. there are < shortest_huffman_code_in_bits left in the bitbuffer (in which case 1. will apply as well)

    # prune the bitbuffer to the number of valid bits (if already pruned, nothing bad will happen)
    del bitbuffer[0:len(bitbuffer) - valid_bits_in_bitbuffer]
    # start another loop like the file reading one above, but this time the loop just matches the bits already in the
    #   bitbuffer, and it doesn't refill them from an input stream. As before, every match "takes out" bits, and the
    #   bitbuffer gets smaller. Once a match can no longer be made with any of the known Huffman codes in the table,
    #   the loop is complete and the function is done
    while len(bitbuffer) >= shortest_huffman_code_in_bits:
        # prune the bitbuffer to the number of valid bits
        del bitbuffer[0:len(bitbuffer) - valid_bits_in_bitbuffer]
        longest_code_possible = int(
            min(longest_huffman_code_in_bits, valid_bits_in_bitbuffer))
        found = None
        nbits_of_hcode = 0
        for nbits_of_hcode in range(longest_code_possible,
                                    shortest_huffman_code_in_bits - 1, -1):
            candidate_code = bitstring.ConstBitArray(
                bitbuffer[0:nbits_of_hcode])
            found = huffman_table.get(candidate_code)
            if found:
                break
        # (end matching loop)
        if found:
            dcode_to_write = found.dict2_value if lut_entry_for_page.dictionary_selector == 1 else found.dict1_value
            outp.write(dcode_to_write)
            write_position += len(dcode_to_write)
            if write_position >= HUFFMAN_PAGE_DECODED_SIZE_MAX:
                # this is the page limit...exit (not an error)
                return write_position
            # it's nbits_of_hcode in size, so this consumes nbits_of_hcode of bits in the bitbuffer
            # ===> denote that fact by decreasing the valid_bits_in_bitbuffer var
            valid_bits_in_bitbuffer -= nbits_of_hcode
        else:
            # error...no code found, so this is the end. clear the bitbuffer
            bitbuffer.clear()
            valid_bits_in_bitbuffer = 0
    # (end while depleting all the bits left in the bitbuffer through matching)

    # return the total number of bytes written to the output file during this function run
    # (NOTE: this is NOT an absolute position in the output file; rather, relative to the 'offset_into_output'
    #   parameter that was passed in to the function)
    return write_position
Exemplo n.º 14
0
        describe_spns=args.spn,
        describe_link_layer=args.link,
        describe_transport_layer=args.transport,
        real_time=args.real_time,
        include_transport_rawdata=args.candata,
        include_na=args.include_na)
    with open(args.candump, 'r') as f:
        for candump_line in f.readlines():
            if candump_line == '\n':
                continue

            try:
                timestamp = float(
                    candump_line.split()[0].lstrip('(').rstrip(')'))
                message = candump_line.split()[2]
                message_id = bitstring.ConstBitArray(hex=message.split('#')[0])
                message_data = bitstring.ConstBitArray(
                    hex=message.split('#')[1])
            except (IndexError, ValueError):
                print("Warning: error in line '%s'" % candump_line,
                      file=sys.stderr)
                continue

            desc_line = ''

            description = describe(message_data.bytes, message_id.uint)
            if args.format:
                json_description = str(json.dumps(description, indent=4))
            else:
                json_description = str(
                    json.dumps(description, separators=(',', ':')))
Exemplo n.º 15
0
 def representation(self, fingerprint):
     b = bitstring.ConstBitArray(fingerprint.bits.flat)
     return "mean(%s,%s)" % (b.hex, fingerprint.value)
Exemplo n.º 16
0
 def bitstring(self, fingerprint):
     return bitstring.ConstBitArray(fingerprint.bits.flat)
Exemplo n.º 17
0
 def representation(self, fingerprint):
     t = fingerprint.bits
     b = bitstring.ConstBitArray().join(
         bitstring.BitArray(uint=x, length=2) for x in t.flat)
     return "2bits(%s,%s)" % (b, fingerprint.value)
Exemplo n.º 18
0
 def representation(self, fingerprint):
     b = bitstring.ConstBitArray(fingerprint.bits.flat)
     return "meany(%s,%s)" % (b.hex, hex(self.scaled_value(fingerprint)))
Exemplo n.º 19
0
 def testCBA(self):
     a = bitstring.ConstBitArray('0xf')
     b = bitstring.ConstBitArray('0xf')
     self.assertTrue(a is b)
     c = bitstring.ConstBitArray('0b1111')
     self.assertFalse(a is c)
Exemplo n.º 20
0
 def testConstBitArrayCopy(self):
     import copy
     cba = bitstring.ConstBitArray(100)
     cba_copy = copy.copy(cba)
     self.assertTrue(cba is cba_copy)
Exemplo n.º 21
0
                    help='format each structure (otherwise single-line)')

args = parser.parse_args()

describer = pretty_j1939.parse.get_describer(describe_pgns=args.pgn, describe_spns=args.spn,
                                             describe_link_layer=args.link, describe_transport_layer=args.transport,
                                             include_transport_rawdata=args.candata,
                                             include_na=args.include_na)
if __name__ == '__main__':
    with open(args.candump, 'r') as f:
        for candump_line in f.readlines():
            if candump_line == '\n':
                continue
            try:
                timestamp = float(candump_line.split(' ')[0].replace('(', '').replace(')', ''))
                message_id = bitstring.ConstBitArray(hex=candump_line.split(' ')[2].split('#')[0])
                message_data = bitstring.ConstBitArray(hex=candump_line.split(' ')[2].split('#')[1])

            except IndexError:
                print("Warning: error in line '%s'" % candump_line, file=sys.stderr)
                continue
            except ValueError:
                print("Warning: error in line '%s'" % candump_line, file=sys.stderr)
                continue

            desc_line = ''

            description = describer(message_data.bytes, message_id.uint)
            if args.format:
                json_description = str(json.dumps(description, indent=4))
            else:
Exemplo n.º 22
0
    def decodets(self):
        adaptation_size = False
        av = False
        adapt = False
        pestype = False
        ptspresent = False
        dtspresent = False
        decodedpts = False
        decodeddts = False
        pcr = False
        opcr = False
        discontinuity = False
        random = False
        espriority = False
        pcrpresent = False
        opcrpresent = False
        splicingpoint = False
        transportprivate = False
        adaptation_ext = False
        packsize = 188
        sync = self.bits.read(8).hex
        if sync == '0x47':
            tei = self.bits.read(1).uint
            pusi = self.bits.read(1).uint
            transportpri = self.bits.read(1).uint
            pid = self.bits.read(13).uint
            packet = self.bits.read((packsize-3)*8)
            scramblecontrol = packet.read(2).uint
            adapt = packet.read(2).uint
            concounter = packet.read(4).uint
            if adapt == 3:
                adaptation_size = packet.read(8).uint
                discontinuity = packet.read(1).uint
                random = packet.read(1).uint
                espriority = packet.read(1).uint
                pcrpresent = packet.read(1).uint
                opcrpresent = packet.read(1).uint
                splicingpoint = packet.read(1).uint
                transportprivate = packet.read(1).uint
                adaptation_ext = packet.read(1).uint
                restofadapt = (adaptation_size+3) - 1
                if pcrpresent == 1:
                    pcr = packet.read(48)
                    restofadapt -=  6
                if opcrpresent == 1:
                    opcr = packet.read(48)
                    restofadapt -=  6
                packet.pos += (restofadapt-3) * 8
                if ((packet.len - packet.pos)/8) > 5:
                    pesync = packet.read(24).hex
                    if pesync == ('0x000001'):
                        pestype = packet.read(8).uint
                        if pestype > 223 and pestype < 240:
                            av = 'video'
                        if pestype < 223 and pestype > 191:
                            av = 'audio'
                        packet.pos += (3*8)
                        ptspresent = packet.read(1).uint
                        dtspresent = packet.read(1).uint
                        if ptspresent:
                            packet.pos += (14)
                            pts = packet.read(40)
                            pts.pos = 4
                            firstpartpts = pts.read(3)
                            pts.pos += 1
                            secondpartpts = pts.read(15)
                            pts.pos += 1
                            thirdpartpts = pts.read(15)
                            decodedpts = bitstring.ConstBitArray().join([firstpartpts.bin, secondpartpts.bin, thirdpartpts.bin]).uint
                        if dtspresent:
                            dts = packet.read(40)
                            dts.pos = 4
                            firstpartdts = dts.read(3)
                            dts.pos += 1
                            secondpartdts = dts.read(15)
                            dts.pos += 1
                            thirdpartdts = dts.read(15)
                            decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
            elif adapt == 2:
                #if adapt is 2 the packet is only an adaptation field
                adaptation_size = packet.read(8).uint
                discontinuity = packet.read(1).uint
                random = packet.read(1).uint
                espriority = packet.read(1).uint
                pcrpresent = packet.read(1).uint
                opcrpresent = packet.read(1).uint
                splicingpoint = packet.read(1).uint
                transportprivate = packet.read(1).uint
                adaptation_ext = packet.read(1).uint
                restofadapt = (adaptation_size+3) - 1
                if pcrpresent == 1:
                    pcr = packet.read(48)
                    restofadapt -=  6
                if opcrpresent == 1:
                    opcr = packet.read(48)
                    restofadapt -=  6
            elif adapt == 1:
                pesync = packet.read(24).hex
                if pesync == ('0x000001'):
                    pestype = packet.read(8).uint
                    if pestype > 223 and pestype < 240:
                        av = 'video'
                    if pestype < 223 and pestype > 191:
                        av = 'audio'
                    packet.pos += 24
                    ptspresent = packet.read(1).uint
                    dtspresent = packet.read(1).uint
                    if ptspresent:
                        packet.pos += 22
                        pts = packet.read(32)
                        firstpart = pts.read(15)
                        pts.pos += 1
                        secondpart = pts.read(15)
                        decodedpts = bitstring.ConstBitArray().join([firstpart.bin, secondpart.bin]).uint
                    if dtspresent:
                            dts = packet.read(40)
                            dts.pos = 4
                            firstpartdts = dts.read(3)
                            dts.pos += 1
                            secondpartdts = dts.read(15)
                            dts.pos += 1
                            thirdpartdts = dts.read(15)
                            decodeddts = bitstring.ConstBitArray().join([firstpartdts.bin, secondpartdts.bin, thirdpartdts.bin]).uint
                
        else:
		
			print "No sync byte"
			return
        
        tsobj = TSPacket(sync, tei, transportpri, pusi, pid, scramblecontrol, adapt, concounter, adaptation_size, discontinuity, \
        random, espriority, pcrpresent, opcrpresent, splicingpoint, transportprivate, adaptation_ext, pcr, opcr, pestype, \
        ptspresent, dtspresent, decodedpts, decodeddts, av)
        
        return tsobj
Exemplo n.º 23
0
    describe_spns=args.spn,
    describe_link_layer=args.link,
    describe_transport_layer=args.transport,
    include_transport_rawdata=args.candata,
    include_na=args.include_na)
if __name__ == '__main__':
    with open(args.candump, 'r') as f:
        for candump_line in f.readlines():
            if candump_line == '\n':
                continue
            try:
                #  (0000000029.695353)  can1  0CF00400   [8]  51 81 8A 44 14 00 F3 8A
                timestamp = float(
                    candump_line.split(' ')[1].replace('(',
                                                       '').replace(')', ''))
                message_id = bitstring.ConstBitArray(
                    hex=candump_line.split(' ')[5])
                message_data = bitstring.ConstBitArray(
                    hex=''.join(candump_line.split(' ')[10:]))
                # print(timestamp)
                # print(message_id)
                # print(message_data)
                if message_id in id_dict:
                    id_dict[message_id] += 1
                else:
                    id_dict[message_id] = 1

            except IndexError:
                print("Warning: error in line '%s'" % candump_line,
                      file=sys.stderr)
                continue
            except ValueError: