예제 #1
0
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap

from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
     string_types, to_native


SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000


_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
                    if sep not in (None, '/'))


def _find_hashlib_algorithms():
    algos = getattr(hashlib, 'algorithms', None)
    if algos is None:
        algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
    rv = {}
    for algo in algos:
        func = getattr(hashlib, algo, None)
        if func is not None:
            rv[algo] = func
예제 #2
0
class Block1(BlockBase):
    # On-disk header format
    #
    # Offset  Type  Desc
    # ------  ----  -------
    # 0       B     Flags
    # 1       B     (Unused)
    # 2       H     (Unused)
    # 4       i     Delta to start of next block
    # ------------- If byte 0 == 0, the first 8 bytes are an absolute pointer
    #               to the next block (backwards compatibility)
    #
    # 8       H     Length of the compressed IDs, or 0 if IDs are not
    #               compressed
    # 10      H     Length of the compressed weights, or 0 if the weights are
    #               not compressed, or 1 if the weights are all 1.0.
    # 12      B     Number of posts in this block
    # 13      f     Maximum weight in this block (used for quality)
    # 17      f     Maximum (weight/fieldlength) in this block (for quality)
    # 21      f     (Unused)
    # 25      B     Minimum length in this block, encoded as byte (for quality)
    #
    # Followed by either an unsigned int or string indicating the last ID in
    # this block

    _struct = Struct("!BBHiHHBfffB")
    magic = -48626

    @classmethod
    def from_file(cls, postfile, stringids=False):
        pos = postfile.tell()
        block = cls(postfile, stringids=stringids)

        encoded_header = postfile.read(cls._struct.size)
        header = cls._struct.unpack(encoded_header)
        (flags, _, _, nextoffset, block.idslen, block.weightslen,
         block.postcount, block.maxweight, block.maxwol, _, minlength) = header

        block.nextoffset = pos + nextoffset
        block.minlength = byte_to_length(minlength)

        assert block.postcount > 0, "postcount=%r" % block.postcount

        if stringids:
            block.maxid = utf8decode(postfile.read_string())[0]
        else:
            block.maxid = postfile.read_uint()

        block.dataoffset = postfile.tell()

        return block

    def read_ids(self):
        postfile = self.postfile
        offset = self.dataoffset
        postcount = self.postcount
        postfile.seek(offset)

        if self.stringids:
            rs = postfile.read_string
            ids = [utf8decode(rs())[0] for _ in xrange(postcount)]
            newoffset = postfile.tell()
        elif self.idslen:
            ids = array("I")
            ids.fromstring(decompress(postfile.read(self.idslen)))
            if IS_LITTLE:
                ids.byteswap()
            newoffset = offset + self.idslen
        else:
            ids = postfile.read_array("I", postcount)
            newoffset = offset + _INT_SIZE * postcount

        self.ids = ids
        self.weights_offset = newoffset
        return ids

    def read_weights(self):
        postfile = self.postfile
        offset = self.weights_offset
        postfile.seek(offset)
        weightslen = self.weightslen
        postcount = self.postcount

        if weightslen == 1:
            weights = None
            newoffset = offset
        elif weightslen:
            weights = array("f")
            weights.fromstring(decompress(postfile.read(weightslen)))
            if IS_LITTLE:
                weights.byteswap()
            newoffset = offset + weightslen
        else:
            weights = postfile.get_array(offset, "f", postcount)
            newoffset = offset + _FLOAT_SIZE * postcount

        self.weights = weights
        self.values_offset = newoffset
        return weights

    def read_values(self):
        postfile = self.postfile
        startoffset = self.values_offset
        endoffset = self.nextoffset
        postcount = self.postcount

        postingsize = self.postingsize
        if postingsize != 0:
            values_string = postfile.map[startoffset:endoffset]

            if self.weightslen:
                # Values string is compressed
                values_string = decompress(values_string)

            if postingsize < 0:
                # Pull the array of value lengths off the front of the string
                lengths = array("i")
                lengths.fromstring(values_string[:_INT_SIZE * postcount])
                values_string = values_string[_INT_SIZE * postcount:]

            # Chop up the block string into individual valuestrings
            if postingsize > 0:
                # Format has a fixed posting size, just chop up the values
                # equally
                values = [
                    values_string[i * postingsize:i * postingsize +
                                  postingsize] for i in xrange(postcount)
                ]
            else:
                # Format has a variable posting size, use the array of lengths
                # to chop up the values.
                pos = 0
                values = []
                for length in lengths:
                    values.append(values_string[pos:pos + length])
                    pos += length
        else:
            # Format does not store values (i.e. Existence), just create fake
            # values
            values = (None, ) * postcount

        self.values = values
        line = f.read(record_struct.size)
        if line == b'':
            break
        yield decode_record(record_struct, line)


def _decode_record(record_struct, line):
    return tuple(s.decode() for s in record_struct.unpack_from(line))


def decode_record(rec):
    return tuple(s.decode() for s in rec)


if __name__ == '__main__':
    # Will throw an AssertionError if the Length variable within the control file is wrong
    check_ctl('/some/dir/to/keep.csv')

    field_widths, keep_fields = import_ctl('/some/dir/to/keep.csv')
    fmt_string = create_fmt(field_widths, keep_fields)
    record_struct = Struct(fmt_string)

    with open('/some/dir/to/fixedfield/split1_sample', 'rb') as infile:
        with open('/some/dir/to/fixedfield/split1_sample.csv', 'w',
                  newline='') as outfile:
            csv_writer = csv.writer(outfile, delimiter=',')
            for rec in record_struct.iter_unpack(
                    infile.read(record_struct.size * 10)):
                # for rec in read_records(record_struct, infile):
                csv_writer.writerow(decode_record(rec))
예제 #4
0
 def str_struct(char_count):
     format_ = '%ds' % char_count
     return Struct(format_)
예제 #5
0
def data_element_generator(fp,
                           is_implicit_VR,
                           is_little_endian,
                           stop_when=None,
                           defer_size=None,
                           encoding=default_encoding,
                           specific_tags=None):
    """Create a generator to efficiently return the raw data elements.

    .. note::

        This function is used internally - usually there is no need to call it
        from user code. To read data from a DICOM file, :func:`dcmread`
        shall be used instead.

    Parameters
    ----------
    fp : file-like
        The file-like to read from.
    is_implicit_VR : bool
        ``True`` if the data is encoded as implicit VR, ``False`` otherwise.
    is_little_endian : bool
        ``True`` if the data is encoded as little endian, ``False`` otherwise.
    stop_when : None, callable, optional
        If ``None`` (default), then the whole file is read. A callable which
        takes tag, VR, length, and returns ``True`` or ``False``. If it
        returns ``True``, ``read_data_element`` will just return.
    defer_size : int, str, None, optional
        See :func:`dcmread` for parameter info.
    encoding :
        Encoding scheme
    specific_tags : list or None
        See :func:`dcmread` for parameter info.

    Returns
    -------
    VR : str or None
        ``None`` if implicit VR, otherwise the VR read from the file.
    length : int
        The length of the DICOM data element (could be DICOM "undefined
        length" ``0xFFFFFFFFL``)
    value_bytes : bytes or str
        The raw bytes from the DICOM file (not parsed into Python types)
    is_little_endian : bool
        ``True`` if transfer syntax is little endian; else ``False``.
    """
    # Summary of DICOM standard PS3.5-2008 chapter 7:
    # If Implicit VR, data element is:
    #    tag, 4-byte length, value.
    #        The 4-byte length can be FFFFFFFF (undefined length)*
    #
    # If Explicit VR:
    #    if OB, OW, OF, SQ, UN, or UT:
    #       tag, VR, 2-bytes reserved (both zero), 4-byte length, value
    #           For all but UT, the length can be FFFFFFFF (undefined length)*
    #   else: (any other VR)
    #       tag, VR, (2 byte length), value
    # * for undefined length, a Sequence Delimitation Item marks the end
    #        of the Value Field.
    # Note, except for the special_VRs, both impl and expl VR use 8 bytes;
    #    the special VRs follow the 8 bytes with a 4-byte length

    # With a generator, state is stored, so we can break down
    #    into the individual cases, and not have to check them again for each
    #    data element

    if is_little_endian:
        endian_chr = "<"
    else:
        endian_chr = ">"

    # assign implicit VR struct to variable as use later if VR assumed missing
    implicit_VR_struct = Struct(endian_chr + "HHL")
    if is_implicit_VR:
        element_struct = implicit_VR_struct
    else:  # Explicit VR
        # tag, VR, 2-byte length (or 0 if special VRs)
        element_struct = Struct(endian_chr + "HH2sH")
        extra_length_struct = Struct(endian_chr + "L")  # for special VRs
        extra_length_unpack = extra_length_struct.unpack  # for lookup speed

    # Make local variables so have faster lookup
    fp_read = fp.read
    fp_tell = fp.tell
    logger_debug = logger.debug
    debugging = config.debugging
    element_struct_unpack = element_struct.unpack
    defer_size = size_in_bytes(defer_size)

    tag_set = {Tag(tag) for tag in specific_tags} if specific_tags else set()
    has_tag_set = bool(tag_set)
    if has_tag_set:
        tag_set.add(Tag(0x00080005))  # Specific Character Set

    while True:
        # Read tag, VR, length, get ready to read value
        bytes_read = fp_read(8)
        if len(bytes_read) < 8:
            return  # at end of file
        if debugging:
            debug_msg = "{0:08x}: {1}".format(fp.tell() - 8,
                                              bytes2hex(bytes_read))

        if is_implicit_VR:
            # must reset VR each time; could have set last iteration (e.g. SQ)
            VR = None
            group, elem, length = element_struct_unpack(bytes_read)
        else:  # explicit VR
            group, elem, VR, length = element_struct_unpack(bytes_read)
            # defend against switching to implicit VR, some writer do in SQ's
            # issue 1067, issue 1035

            if config.assume_implicit_vr_switch and not (b'AA' <= VR <= b'ZZ'):
                # invalid VR, must be two cap chrs
                message = ("Explicit VR character(s) invalid. "
                           "Assuming data element is implicit VR "
                           "and attempting to continue")
                warnings.warn(message, UserWarning)
                VR = None
                group, elem, length = implicit_VR_struct.unpack(bytes_read)
            else:
                VR = VR.decode(default_encoding)
                if VR in extra_length_VRs:
                    bytes_read = fp_read(4)
                    length = extra_length_unpack(bytes_read)[0]
                    if debugging:
                        debug_msg += " " + bytes2hex(bytes_read)

        if debugging:
            debug_msg = "%-47s  (%04x, %04x)" % (debug_msg, group, elem)
            if not is_implicit_VR:
                debug_msg += " %s " % VR
            if length != 0xFFFFFFFF:
                debug_msg += "Length: %d" % length
            else:
                debug_msg += "Length: Undefined length (FFFFFFFF)"
            logger_debug(debug_msg)

        # Positioned to read the value, but may not want to -- check stop_when
        value_tell = fp_tell()
        tag = TupleTag((group, elem))
        if stop_when is not None:
            # XXX VR may be None here!! Should stop_when just take tag?
            if stop_when(tag, VR, length):
                if debugging:
                    logger_debug("Reading ended by stop_when callback. "
                                 "Rewinding to start of data element.")
                rewind_length = 8
                if not is_implicit_VR and VR in extra_length_VRs:
                    rewind_length += 4
                fp.seek(value_tell - rewind_length)
                return

        # Reading the value
        # First case (most common): reading a value with a defined length
        if length != 0xFFFFFFFF:
            # don't defer loading of Specific Character Set value as it is
            # needed immediately to get the character encoding for other tags
            if has_tag_set and tag not in tag_set:
                # skip the tag if not in specific tags
                fp.seek(fp_tell() + length)
                continue

            if (defer_size is not None and length > defer_size
                    and tag != BaseTag(0x00080005)):
                # Flag as deferred by setting value to None, and skip bytes
                value = None
                logger_debug("Defer size exceeded. "
                             "Skipping forward to next data element.")
                fp.seek(fp_tell() + length)
            else:
                value = (fp_read(length)
                         if length > 0 else empty_value_for_VR(VR, raw=True))
                if debugging:
                    dotdot = "..." if length > 20 else "   "
                    displayed_value = value[:20] if value else b''
                    logger_debug("%08x: %-34s %s %r %s" %
                                 (value_tell, bytes2hex(displayed_value),
                                  dotdot, displayed_value, dotdot))

            # If the tag is (0008,0005) Specific Character Set, then store it
            if tag == BaseTag(0x00080005):
                from pydicom.values import convert_string
                encoding = convert_string(value or b'', is_little_endian)
                # Store the encoding value in the generator
                # for use with future elements (SQs)
                encoding = convert_encodings(encoding)

            yield RawDataElement(tag, VR, length, value, value_tell,
                                 is_implicit_VR, is_little_endian)

        # Second case: undefined length - must seek to delimiter,
        # unless is SQ type, in which case is easier to parse it, because
        # undefined length SQs and items of undefined lengths can be nested
        # and it would be error-prone to read to the correct outer delimiter
        else:
            # Try to look up type to see if is a SQ
            # if private tag, won't be able to look it up in dictionary,
            #   in which case just ignore it and read the bytes unless it is
            #   identified as a Sequence
            if VR is None:
                try:
                    VR = dictionary_VR(tag)
                except KeyError:
                    # Look ahead to see if it consists of items
                    # and is thus a SQ
                    next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
                    # Rewind the file
                    fp.seek(fp_tell() - 4)
                    if next_tag == ItemTag:
                        VR = 'SQ'

            if VR == 'SQ':
                if debugging:
                    msg = "{0:08x}: Reading/parsing undefined length sequence"
                    logger_debug(msg.format(fp_tell()))
                seq = read_sequence(fp, is_implicit_VR, is_little_endian,
                                    length, encoding)
                if has_tag_set and tag not in tag_set:
                    continue
                yield DataElement(tag,
                                  VR,
                                  seq,
                                  value_tell,
                                  is_undefined_length=True)
            else:
                delimiter = SequenceDelimiterTag
                if debugging:
                    logger_debug("Reading undefined length data element")
                value = read_undefined_length_value(fp, is_little_endian,
                                                    delimiter, defer_size)

                # tags with undefined length are skipped after read
                if has_tag_set and tag not in tag_set:
                    continue
                yield RawDataElement(tag, VR, length, value, value_tell,
                                     is_implicit_VR, is_little_endian)
예제 #6
0
# Import common utilites and base classes
from cgsn_parsers.parsers.common import ParserCommon
from cgsn_parsers.parsers.common import dcl_to_epoch, inputs, DCL_TIMESTAMP, FLOAT, NEWLINE

# Regex pattern for a line with a DCL time stamp and the OCR-507 data sample
PATTERN = (
    DCL_TIMESTAMP + r'\s+' +     # DCL Time-Stamp
    r'SATDI7' + r'([\d]{4})' +   # Serial number
    FLOAT +                      # Timer (seconds)
    b'([\x00-\xFF]{38})' +       # binary data packet
    NEWLINE
)
REGEX = re.compile(PATTERN, re.DOTALL)

# Set the format for the binary packet for later unpacking
SPKIR = Struct('<h7I3HBB')

_parameter_names_spkir = [
        'date_time_string',
        'serial_number',
        'timer',
        'sample_delay',
        'raw_channels',
        'input_voltage',
        'analog_rail_voltage',
        'frame_counter',
        'internal_temperature'
    ]


class Parser(ParserCommon):
예제 #7
0
    def _read_gmcurv(self, data: bytes, n: int) -> int:
        """
        Word Name Type Description
        1 CURVID       I Curve identification number
        2 GROUP(2) CHAR4 Group of curves/surfaces to which this curve belongs
        4 CIDIN        I Coordinate system identification number for the geometry
        5 CIDBC        I Coordinate system identification number for the constraints
        6 DATA     CHAR4 Geometry evaluator specific data
        """
        size = self.size
        if size == 4:
            struct_i = self.struct_i
            structi = Struct(b'i 8s ii')
        else:
            struct_i = self.struct_q
            structi = Struct(b'q 16s qq')

        ntotal1 = 20 * self.factor
        ntotal2 = 64 * self.factor
        while n < len(data):
            datab = data[n:n+ntotal1]
            curve_id, group_bytes, cid_in, cid_bc = structi.unpack(datab)
            if size == 8:
                group_bytes = reshape_bytes_block(group_bytes)
            group = group_bytes.decode('latin1').rstrip()
            #print(curve_id, group, cid_in, cid_bc)
            assert group in ['MSCGRP0', 'MSCGRP1', 'MSCGRP2'], f'GMCURV: curve_id={curve_id} group={group!r} cid_in={cid_in} cid_bc={cid_bc}'
            n += ntotal1

            databi_bytes = data[n:n+size]
            n += size
            databi = data[n:n+size]
            datab_int, = struct_i.unpack(databi)
            n += size
            while datab_int != -1:
                databi_bytes += databi
                databi = data[n:n+size]
                datab_int, = struct_i.unpack(databi)
                n += size
            datai = databi_bytes.decode('latin1').rstrip()

            data_split = ['        %s\n' % datai[i:i+ntotal2].strip() for i in range(0, len(datai), ntotal2)]
            self.add_gmcurv(curve_id, group, data_split, cid_in=cid_in, cid_bc=cid_bc)
            #print(datai)

        #ints = np.frombuffer(data[n:], dtype=self.idtype).copy()
        #iminus1 = np.where(ints == -1)[0].tolist()
        #i0 = 0
        #for iminus1i in iminus1:
            #curve_id = ints[i0]
            #cid_in, cid_bc = ints[i0+3:i0+5]
            #s0 = n + 4
            #s1 = s0 + 8
            #group = data[s0:s1].decode('latin1').rstrip()
            #print(curve_id, group, cid_in, cid_bc)
            #assert group in ['MSCGRP1', 'MSCGRP2'], f'GMCURV: curve_id={curve_id} group={group!r} cid_in={cid_in} cid_bc={cid_bc}'

            #s2 = s1 + 8
            #s3 = 12 + iminus1i * 4
            #datai = data[s2:s3].decode('latin1').rstrip()
            #print('datai = %r' % datai)
            #i0 = iminus1i + 1
            ## n = s3 + 4
            #n = 12+(iminus1i + 1)*4
            #print('-----------------')
        #return len(data)
        return n
예제 #8
0
#!/usr/bin/env python3

import sys, os, zlib, gzip
import common
from struct import Struct
from io import BytesIO
from nbt_codec import NBTCodec
from raw_codec import RawCodec

OFFSET_STRUCT = Struct("!bhb")
CHUNK_COUNT = 32 * 32
OFFSETS_SIZE = OFFSET_STRUCT.size * CHUNK_COUNT
TIMESTAMPS_OFFSET = OFFSETS_SIZE
TIMESTAMPS_SIZE = 4 * CHUNK_COUNT
PACKING_STRUCT = Struct("!ib")


class MCRCodec(object):
    def __init__(self, encapsulated):
        self._encapsulated = encapsulated

    def process(self, mcr_file, out_file):
        out_file.write(b"MCR\n")
        offsets = mcr_file.read(OFFSETS_SIZE)
        if len(offsets) < OFFSETS_SIZE:
            raise Exception("Region file ends inside offsets")

        mcr_file.seek(TIMESTAMPS_OFFSET)
        common.handle_large_tag(mcr_file, out_file, TIMESTAMPS_SIZE)

        for index in range(CHUNK_COUNT):
예제 #9
0
import sys
from struct import Struct

BSE_INP_PORT_LIVE_CM = 2001
BSE_INP_PORT_LIVE_FO = 2002
BSE_INP_PORT_LIVE_CD = 2003
BSE_INP_PORT_TEST_CM = 2001  #2008
BSE_INP_PORT_TEST_FO = 2009
BSE_INP_PORT_TEST_CD = 2010

## *********************** Packet struct ***********************
BSE_ENDIAN = '<'

## *********************** Header ***********************
BSE_CONST_HEADER_FMT = "%s4s h h" % (BSE_ENDIAN)
BSE_CONST_HEADER_STRUCT = Struct(BSE_CONST_HEADER_FMT)
BSE_CONST_HEADER_SIZE = 8
## These are the common fields in all packet format. So we will consider this as header.
## (msgType,) ## This will contain the transaction code.
BCAST_HEADER_MSG_TYPE_FMT = "%sL" % (BSE_ENDIAN)
BCAST_HEADER_MSG_TYPE_STRUCT = Struct(BCAST_HEADER_MSG_TYPE_FMT)
BCAST_HEADER_MSG_TYPE_SIZE = 4
## (res1, res2, res3, hour, minute, second, milliSec)
BSE_BCAST_HEADER_FMT = "%s2L H 4H" % (BSE_ENDIAN)
BSE_BCAST_HEADER_STRUCT = Struct(BSE_BCAST_HEADER_FMT)
BSE_BCAST_HEADER_LEN = 18
BSE_TOTAL_BCAST_HEADER_LEN = BCAST_HEADER_MSG_TYPE_SIZE + BSE_BCAST_HEADER_LEN
## *********************** END : Header ***********************

## 1.1.112 : Time Broadcast [2001]
BSE_TIME_BROADCAST_CODE_2001 = 2001
예제 #10
0
    def write_op2(self,
                  op2,
                  op2_ascii,
                  itable,
                  new_result,
                  date,
                  is_mag_phase=False,
                  endian='>'):
        """writes an OP2"""
        import inspect
        from struct import Struct, pack
        frame = inspect.currentframe()
        call_frame = inspect.getouterframes(frame, 2)
        op2_ascii.write('%s.write_op2: %s\n' %
                        (self.__class__.__name__, call_frame[1][3]))

        if itable == -1:
            self._write_table_header(op2, op2_ascii, date)
            itable = -3

        #if isinstance(self.nonlinear_factor, float):
        #op2_format = '%sif' % (7 * self.ntimes)
        #raise NotImplementedError()
        #else:
        #op2_format = 'i21f'
        #s = Struct(op2_format)

        eids = self.element

        # table 4 info
        #ntimes = self.data.shape[0]
        #nnodes = self.data.shape[1]
        nelements = self.data.shape[1]

        # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
        #ntotal = ((nnodes * 21) + 1) + (nelements * 4)

        ntotali = self.num_wide
        ntotal = ntotali * nelements

        #print('shape = %s' % str(self.data.shape))
        #assert self.ntimes == 1, self.ntimes

        device_code = self.device_code
        op2_ascii.write('  ntimes = %s\n' % self.ntimes)

        eids_device = self.element * 10 + self.device_code

        #fmt = '%2i %6f'
        #print('ntotal=%s' % (ntotal))
        #assert ntotal == 193, ntotal

        if self.is_sort1:
            struct1 = Struct(endian + b'i12f')
        else:
            raise NotImplementedError('SORT2')

        op2_ascii.write('nelements=%i\n' % nelements)

        for itime in range(self.ntimes):
            #print('3, %s' % itable)
            self._write_table_3(op2, op2_ascii, new_result, itable, itime)

            # record 4
            #print('stress itable = %s' % itable)
            itable -= 1
            #print('4, %s' % itable)
            header = [4, itable, 4, 4, 1, 4, 4, 0, 4, 4, ntotal, 4, 4 * ntotal]
            op2.write(pack('%ii' % len(header), *header))
            op2_ascii.write('r4 [4, 0, 4]\n')
            op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
            op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))

            tx = self.data[itime, :, 0]
            ty = self.data[itime, :, 1]
            tz = self.data[itime, :, 2]
            rx = self.data[itime, :, 3]
            ry = self.data[itime, :, 4]
            rz = self.data[itime, :, 5]
            for eid, itx, ity, itz, irx, iry, irz in zip(
                    eids, tx, ty, tz, rx, ry, rz):
                [txr, tyr, tzr, rxr, ryr, rzr, txi, tyi, tzi, rxi, ryi,
                 rzi] = write_imag_floats_13e([itx, ity, itz, irx, iry, irz],
                                              is_mag_phase)
                data = [
                    eid, itx.real, ity.real, itz.real, irx.real, iry.real,
                    irz.real, itx.imag, ity.imag, itz.imag, irx.imag, iry.imag,
                    irz.imag
                ]
                op2_ascii.write('  eid=%s data=%s\n' %
                                (eids_device, str(data)))
                op2.write(struct1.pack(*data))

            itable -= 1
            header = [
                4 * ntotal,
            ]
            op2.write(pack('i', *header))
            op2_ascii.write('footer = %s\n' % header)
            new_result = False
        return itable
예제 #11
0
            self.CH4 = round(rec[11], 3)
            self.H2 = round(rec[12], 3)
            self.C2H5OH = round(rec[13], 3)

        else:
            print("Data Packets: Invalid size of received list")


# URL for HTTP GET Requests
URL = "http://crohmi.seecs.nust.edu.pk/datauploadscript.php"

# Make a FIFO Queue for the incomding Data Packets
dataPacketStack = Queue.Queue(0)

# Defines the structure of how the incoming data is arranged
structure = Struct('ifffffffffffff')

# Defines the Serial Port to listen to and what baudrate
try:
    ser = serial.Serial(port='/dev/ttyACM0',
                        baudrate=9600,
                        parity=serial.PARITY_NONE,
                        stopbits=serial.STOPBITS_ONE,
                        bytesize=serial.EIGHTBITS,
                        timeout=None)
except:
    ser = serial.Serial(port='/dev/ttyACM1',
                        baudrate=9600,
                        parity=serial.PARITY_NONE,
                        stopbits=serial.STOPBITS_ONE,
                        bytesize=serial.EIGHTBITS,
예제 #12
0
def encode(x):
    """
    Encode float x between -1 and 1 as two bytes.
    """
    i = int(16380 * x)
    return Struct('h').pack(i)
예제 #13
0
from __future__ import print_function
"""Support functions and classes implementing the KinoSearch-like external sort
merging model. This module does not contain any user-level objects.
"""

import os, tempfile
from heapq import heapify, heapreplace, heappop
from struct import Struct

from alfanous.Support.whoosh.filedb.structfile import StructFile, pack_ushort, unpack_ushort
from alfanous.Support.whoosh.system import _INT_SIZE, _USHORT_SIZE
from alfanous.Support.whoosh.util import utf8encode, utf8decode

# Utility functions

_2int_struct = Struct("!II")
pack2ints = _2int_struct.pack
unpack2ints = _2int_struct.unpack


def encode_posting(fieldnum, text, doc, freq, datastring):
    """Encodes a posting as a string, for sorting.
    """

    return "".join([
        pack_ushort(fieldnum),
        utf8encode(text)[0],
        chr(0),
        pack2ints(doc, freq), datastring
    ])
예제 #14
0
import os
import posixpath
from random import SystemRandom
from struct import Struct

from ._compat import izip
from ._compat import PY2
from ._compat import range_type
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_native

SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
DEFAULT_PBKDF2_ITERATIONS = 150000

_pack_int = Struct(">I").pack
_builtin_safe_str_cmp = getattr(hmac, "compare_digest", None)
_sys_rng = SystemRandom()
_os_alt_seps = list(
    sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/")
)


def pbkdf2_hex(
    data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
):
    """Like :func:`pbkdf2_bin`, but returns a hex-encoded string.

    .. versionadded:: 0.9

    :param data: the data to derive.
예제 #15
0
파일: fields.py 프로젝트: badock/libopenttd
 def _build_struct(cls, fmt):
     # We enforce the < prefix
     if fmt[0] in ['@', '=', '<', '>', '!']:
         fmt = fmt[1:]
     fmt = '<%s' % fmt
     return Struct(fmt)
예제 #16
0
__author__ = 'Matthew Witherwax (lemoneer)'

from struct import Struct, pack

from .constants import DAYS, MOTORS, LEDS, BUTTONS, DRIVE, WEEKDAY_LEDS, SCHEDULING_LEDS

pack_op_code = Struct('B').pack
# note all the packs below include a leading byte for the op code
# for instance, pack_signed_byte actually packs two bytes - the
# op code and the data byte
pack_signed_byte = Struct('Bb').pack
pack_unsigned_byte = Struct('B' * 2).pack
pack_2unsigned_bytes = Struct('B' * 3).pack
pack_3signed_bytes = Struct('B' + 'b' * 3).pack
pack_3unsigned_bytes = Struct('B' * 4).pack
pack_4unsigned_bytes = Struct('B' * 5).pack
pack_schedule = Struct('B' + 'b' * 15).pack
pack_drive = Struct('>Bhh').pack
pack_drive_special_cases = Struct('>BhH').pack


def start():
    return pack_op_code(128)


def reset():
    return pack_op_code(7)


def stop():
    return pack_op_code(173)
예제 #17
0
파일: parser.py 프로젝트: kivy/oscpy

from struct import Struct, pack, unpack_from, calcsize
from time import time
import sys
from collections import Counter, namedtuple
from oscpy.stats import Stats

if sys.version_info.major > 2:  # pragma: no cover
    UNICODE = str
    izip = zip
else:  # pragma: no cover
    UNICODE = unicode
    from itertools import izip

INT = Struct('>i')
FLOAT = Struct('>f')
STRING = Struct('>s')
TIME_TAG = Struct('>II')

TP_PACKET_FORMAT = "!12I"
# 1970-01-01 00:00:00
NTP_DELTA = 2208988800

NULL = b'\0'
EMPTY = tuple()
INF = float('inf')

MidiTuple = namedtuple('MidiTuple', 'port_id status_byte data1 data2')

def padded(l, n=4):
예제 #18
0
import queue
import threading
import hmac
from struct import Struct
import webbrowser

from .i18n import _


def inv_dict(d):
    return {v: k for k, v in d.items()}

base_units = {'QTUM':8, 'mQTUM':5, 'uQTUM':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]

unpack_int32_from = Struct('<i').unpack_from
unpack_int64_from = Struct('<q').unpack_from
unpack_uint16_from = Struct('<H').unpack_from
unpack_uint32_from = Struct('<I').unpack_from
unpack_uint64_from = Struct('<Q').unpack_from

def normalize_version(v):
    return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]


class NotEnoughFunds(Exception): pass


class InvalidPassword(Exception):
    def __str__(self):
        return _("Incorrect password")
예제 #19
0
    def _read_grid_maybe(self, data: bytes, n: int) -> int:  # pragma: no cover
        """(4501, 45, 1120001) - the marker for Record 17"""
        return len(data)
        #nfields = (len(data) - n) // 4
        # nfields = 3 * 11 * 17 * 71

        # it's not 11, 17...
        #self.show_data(data[12:], types='if')

        # 2i: correct
        #   id, 0
        # i: ???
        # i/f: ???
        # f: correct
        # i: ???
        # f: correct
        # 5i: ???
        #                                    ? ?   ?
        structi = Struct(self._endian + b'2i i f i f 5i') # 11...decent

        #structi = Struct(self._endian + b'17i') # 17...not a chance

        # i: id
        # i/f: ???
        # 3f
        #structi = Struct(self._endian + b'i i 3f 28i') # 33...better?...still not right

        #structi = Struct(self._endian + b'51i') # 17*3..nope
        #structi = Struct(self._endian + b'71i') # 71...nope
        #structi = Struct(self._endian + b'187i') # 11*17...

        ntotal = 4 * 11

        nentries = (len(data) - n) // ntotal
        leftover = (len(data) - n) % ntotal
        assert leftover == 0, f'ndata={len(data)-n} leftover={leftover}'
        nfailed = 0
        for unused_i in range(nentries):
            edata = data[n:n + ntotal]
            out = structi.unpack(edata)
            self.log.debug(out)
            n += ntotal
            continue
            (nid, cp, x1, x2, x3, cd, ps, seid) = out
            if self.is_debug_file:
                self.binary_debug.write('  GRID=%s\n' % str(out))
            if nid < 10000000:
                # cd can be < 0
                if ps == 0:
                    ps = ''
                node = GRID(nid, np.array([x1, x2, x3]), cp, cd, ps, seid)
                self._type_to_id_map['GRID'].append(nid)
                self.nodes[nid] = node
                #if nid in self.nodes:
                    #self.reject_lines.append(str(node))
                #else:
                #self.nodes[nid] = node
                #self.add_node(node)
            else:
                #self.log.warning('*nid=%s cp=%s x1=%-5.2f x2=%-5.2f x3=%-5.2f cd=%-2s ps=%s '
                                 #'seid=%s' % (nid, cp, x1, x2, x3, cd, ps, seid))
                #node = GRID(nid, np.array([x1, x2, x3]), cp, cd, ps, seid)
                #self.rejects.append(str(node))
                nfailed += 1
            n += ntotal
        self.increase_card_count('GRID', nentries - nfailed)
        return n
예제 #20
0
파일: ept.py 프로젝트: umvarma/pynastran
    def _readPBARL(self, data, n):
        """
        PBARL(9102,91,52) - the marker for Record 12
        TODO: buggy
        """
        validTypes = {
            "ROD": 1,
            "TUBE": 2,
            "I": 6,
            "CHAN": 4,
            "T": 4,
            "BOX": 4,
            "BAR": 2,
            "CROSS": 4,
            "H": 4,
            "T1": 4,
            "I1": 4,
            "CHAN1": 4,
            "Z": 4,
            "CHAN2": 4,
            "T2": 4,
            "BOX1": 6,
            "HEXA": 3,
            "HAT": 4,
            "HAT1": 5,
            "DBOX": 10,  # was 12
            #'MLO TUBE' : 2,
        }  # for GROUP="MSCBML0"

        ntotal = 28  # 7*4 - ROD - shortest entry...could be buggy... # TODO fix this
        s = Struct(b'2i8s8sf')
        #nentries = (len(data) - n) // ntotal
        #print(self.show_ndata(80))
        ndata = len(data)
        while ndata - n > ntotal:
            eData = data[n:n + 28]
            n += 28

            out = s.unpack(eData)
            (pid, mid, group, Type, value) = out
            Type = Type.strip()
            dataIn = [pid, mid, group, Type, value]
            print("pid=%s mid=%s group=%r Type=%r value=%s" %
                  (pid, mid, group, Type, value))
            if pid > 100000000:
                raise RuntimeError('bad parsing...')
            expectedLength = validTypes[Type]
            iFormat = b'%if' % expectedLength

            ndelta = expectedLength * 4
            dataIn += list(unpack(iFormat, data[n:n + ndelta]))
            # TODO why do i need the +4???
            #min_len =  expectedLength * 4 + 4
            #if len(data)
            #data = data[n + expectedLength * 4 + 4:]
            n += ndelta

            #prin( "len(out) = ",len(out)))
            #print("PBARL = %s" % dataIn)
            prop = PBARL(None, dataIn)  # last value is nsm
            self.addOp2Property(prop)
            #print(self.show_data(data[n-8:-100]))
            break
        self._increase_card_count('PBARL')
        #assert len(data) == n
        return len(data)
예제 #21
0
        if payload_checksum != checksum:
            raise BadChecksumError(payload_checksum, checksum)
        return command, payload

    def _checksum(self, payload):
        raise NotImplementedError

    def _build_header(self, command, payload):
        raise NotImplementedError

    async def _receive_header(self):
        raise NotImplementedError


# Helpers
struct_le_I = Struct('<I')
pack_le_uint32 = struct_le_I.pack


def sha256(x):
    """Simple wrapper of hashlib sha256."""
    return _sha256(x).digest()


def double_sha256(x):
    """SHA-256 of SHA-256, as used extensively in bitcoin."""
    return sha256(sha256(x))


class BadChecksumError(Exception):
    pass
예제 #22
0
파일: stl.py 프로젝트: als0052/pyNastran
    def read_binary_stl(self, stl_filename: str) -> None:
        """
        Read an STL binary file

        Parameters
        ----------
        stl_filename : str
            the filename to read

        """
        with open(stl_filename, 'rb') as infile:
            data = infile.read()

        ndata = len(data)
        j = 0
        while j < ndata:
            self.log.info(f'  read_binary_stl: j={j} ndata={ndata}')
            self.header = data[j:j + 80]
            nelements, = unpack('i', data[j + 80:j + 84])
            j += 84

            inode = 0
            nodes_dict = {}
            assert nelements > 0, f'nelements={nelements}'
            elements = np.zeros((nelements, 3), 'int32')

            s = Struct('12fH')
            for ielement in range(nelements):
                (unused_nx, unused_ny, unused_nz, ax, ay, az, bx, by, bz, cx,
                 cy, cz, unused_i) = s.unpack(data[j:j + 50])

                t1 = (ax, ay, az)
                t2 = (bx, by, bz)
                t3 = (cx, cy, cz)
                if t1 in nodes_dict:
                    i1 = nodes_dict[t1]
                else:
                    i1 = inode
                    nodes_dict[t1] = inode
                    inode += 1

                if t2 in nodes_dict:
                    i2 = nodes_dict[t2]
                else:
                    i2 = inode
                    nodes_dict[t2] = inode
                    inode += 1

                if t3 in nodes_dict:
                    i3 = nodes_dict[t3]
                else:
                    i3 = inode
                    nodes_dict[t3] = inode
                    inode += 1
                elements[ielement] = [i1, i2, i3]
                j += 50
        assert inode > 0, inode
        nnodes = inode + 1  # accounting for indexing
        self.elements = elements

        nodes = np.zeros((nnodes, 3), 'float64')
        for node, inode in nodes_dict.items():
            nodes[inode] = node
        self.nodes = nodes
예제 #23
0
 def _read_int(self, fmt, base, offset):
     struct = Struct(fmt)
     return self._unpack_item(struct, base, offset)
예제 #24
0
from __future__ import division, print_function
from struct import Struct
import os
# from pprint import pprint
from collections import namedtuple

# https://stackoverflow.com/questions/2646157/what-is-the-fastest-to-access-struct-like-object-in-python


def CtoF(x):
    return x


# use: unpack_bool_byte(data)[0] -> returns tuple, but grab 0 entry
unpack_bool_byte = Struct('?').unpack  # 1 byte bool
unpack_byte = Struct('b').unpack  # 1 signed byte
unpack_unsigned_byte = Struct('B').unpack  # 1 unsigned byte
unpack_short = Struct('>h').unpack  # 2 signed bytes (short)
unpack_unsigned_short = Struct('>H').unpack  # 2 unsigned bytes (ushort)


# could replace with:
# class LEDS(object): DEBRIS=0x01; SPOT=0x02; DOCK=0x04; CHECK_ROBOT=0x08
class Namespace(object):
    def __init__(self, **kwds):
        self.__dict__.update(kwds)


# BAUD_RATE           = Namespace(BAUD_300=0, BAUD_600=1, BAUD_1200=2, BAUD_2400=3, BAUD_4800=4, BAUD_9600=5, BAUD_14400=6,
#                                BAUD_19200=7, BAUD_28800=8, BAUD_38400=9, BAUD_57600=10, BAUD_115200=11, DEFAULT=11)
예제 #25
0
class Block2(BlockBase):
    magic = 1114401586  # "Blk2"

    # Offset  Type  Desc
    # ------  ----  -------
    # 0       i     Delta to next block
    # 4       B     Flags (compression)
    # 5       B     Post count
    # 6       c     ID array typecode
    # 7       B     -Unused
    # 8       i     IDs length
    # 12      i     Weights length
    # 16      f     Maximum weight
    # 20      f     Max weight-over-length
    # 24      H     -Unused
    # 26      B     -Unused
    # 27      B     Maximum length, encoded as byte
    # 28      B     Minimum length, encoded as byte
    #
    # Followed by either an unsigned int or string indicating the last ID in
    # this block
    _struct = Struct("<iBBcBiiffHBBB")

    @classmethod
    def from_file(cls, postfile, postingsize, stringids=False):
        start = postfile.tell()

        # Read the block header information from the posting file
        header = cls._struct.unpack(postfile.read(cls._struct.size))

        # Create the base block object
        block = cls(postfile,
                    postingsize,
                    stringids=stringids,
                    maxweight=header[7],
                    maxwol=header[8],
                    maxlength=header[11],
                    minlength=header[12])

        # Fill in the attributes needed by this block implementation
        block.nextoffset = start + header[0]
        block.compression = header[1]
        block.postcount = header[2]
        block.typecode = header[3]
        block.idslen = header[5]
        block.weightslen = header[6]

        if PY3:
            block.typecode = block.typecode.decode('latin-1')

        # Read the "maximum ID" part of the header, based on whether we're
        # using string IDs
        if stringids:
            block.maxid = load(postfile)
        else:
            block.maxid = postfile.read_uint()

        # The position after the header
        block.dataoffset = postfile.tell()
        return block

    def read_ids(self):
        dataoffset = self.dataoffset
        ids_string = self.postfile.map[dataoffset:dataoffset + self.idslen]
        if self.compression:
            ids_string = decompress(ids_string)

        if self.stringids:
            ids = loads(ids_string)
        else:
            ids = array(self.typecode)
            ids.fromstring(ids_string)
            if not IS_LITTLE:
                ids.byteswap()

        self.ids = ids
        return ids

    def read_weights(self):
        if self.weightslen == 0:
            weights = [1.0] * self.postcount
        else:
            offset = self.dataoffset + self.idslen
            weights_string = self.postfile.map[offset:offset + self.weightslen]
            if self.compression:
                weights_string = decompress(weights_string)
            weights = array("f")
            weights.fromstring(weights_string)
            if not IS_LITTLE:
                weights.byteswap()

        self.weights = weights
        return weights

    def read_values(self):
        postingsize = self.postingsize
        if postingsize == 0:
            values = [None] * self.postcount
        else:
            offset = self.dataoffset + self.idslen + self.weightslen
            values_string = self.postfile.map[offset:self.nextoffset]
            if self.compression:
                values_string = decompress(values_string)
            if postingsize < 0:
                values = loads(values_string)
            else:
                values = [
                    values_string[i:i + postingsize]
                    for i in xrange(0, len(values_string), postingsize)
                ]

        self.values = values
        return values

    def write(self, compression=3):
        postfile = self.postfile
        stringids = self.stringids
        ids = self.ids
        weights = self.weights
        values = self.values
        postcount = len(ids)

        if postcount <= 4 or not can_compress:
            compression = 0

        # Max ID
        maxid = ids[-1]
        if stringids:
            maxid_string = dumps(maxid, -1)[2:]
        else:
            maxid_string = pack_uint(maxid)

        # IDs
        typecode = "I"
        if stringids:
            ids_string = dumps(ids, -1)[2:]
            typecode = "s"
        else:
            if maxid <= 255:
                typecode = "B"
            elif maxid <= 65535:
                typecode = "H"
            if typecode != ids.typecode:
                ids = array(typecode, iter(ids))
            if not IS_LITTLE:
                ids.byteswap()
            ids_string = ids.tostring()
        if compression:
            ids_string = compress(ids_string, compression)

        # Weights
        if all(w == 1.0 for w in weights):
            weights_string = b('')
        else:
            if not IS_LITTLE:
                weights.byteswap()
            weights_string = weights.tostring()
        if weights_string and compression:
            weights_string = compress(weights_string, compression)

        # Values
        postingsize = self.postingsize
        if postingsize < 0:
            values_string = dumps(values, -1)[2:]
        elif postingsize == 0:
            values_string = b('')
        else:
            values_string = b("").join(values)
        if values_string and compression:
            values_string = compress(values_string, compression)

        # Header
        flags = 1 if compression else 0
        blocksize = sum((self._struct.size, len(maxid_string), len(ids_string),
                         len(weights_string), len(values_string)))
        header = self._struct.pack(blocksize, flags, postcount,
                                   typecode.encode('latin-1'), 0,
                                   len(ids_string), len(weights_string),
                                   self.max_weight(), self.max_wol(), 0, 0,
                                   self._maxlength, self._minlength or 0)

        postfile.write(header)
        postfile.write(maxid_string)
        postfile.write(ids_string)
        postfile.write(weights_string)
        postfile.write(values_string)
예제 #26
0
파일: table.py 프로젝트: Taejun/happybase
"""

import logging
from numbers import Integral
from operator import attrgetter
from struct import Struct

from .hbase.ttypes import TScan
from .util import thrift_type_to_dict, str_increment, OrderedDict
from .batch import Batch

logger = logging.getLogger(__name__)

make_cell = attrgetter('value')
make_cell_timestamp = attrgetter('value', 'timestamp')
pack_i64 = Struct('>q').pack


def make_row(cell_map, include_timestamp):
    """Make a row dict for a cell mapping like ttypes.TRowResult.columns."""
    cellfn = include_timestamp and make_cell_timestamp or make_cell
    return dict((cn, cellfn(cell)) for cn, cell in cell_map.iteritems())


def make_ordered_row(sorted_columns, include_timestamp):
    """Make a row dict for sorted column results from scans."""
    cellfn = include_timestamp and make_cell_timestamp or make_cell
    return OrderedDict(
        (column.columnName, cellfn(column.cell))
        for column in sorted_columns)
예제 #27
0
# Object types
OBJECT_GETPUBKEY = 0
OBJECT_PUBKEY = 1
OBJECT_MSG = 2
OBJECT_BROADCAST = 3
OBJECT_ONIONPEER = 0x746f72
OBJECT_I2P = 0x493250
OBJECT_ADDR = 0x61646472

eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
    '>Q', random.randrange(1, 18446744073709551615))

# Compiled struct for packing/unpacking headers
# New code should use CreatePacket instead of Header.pack
Header = Struct('!L12sL4s')

VersionPacket = Struct('>LqQ20s4s36sH')

# Bitfield


def getBitfield(address):
    """Get a bitfield from an address"""
    # bitfield of features supported by me (see the wiki).
    bitfield = 0
    # send ack
    if not BMConfigParser().safeGetBoolean(address, 'dontsendack'):
        bitfield |= BITFIELD_DOESACK
    return pack('>I', bitfield)
예제 #28
0
파일: erlterms.py 프로젝트: ugufugu/erlport
        return (type(self) == type(other) and self.language == other.language
                and self.data == other.data)

    def __ne__(self, other):
        return not self == other

    def __hash__(self):
        return hash((self.__class__, self.language, self.data))

    def __repr__(self):
        return "OpaqueObject(%r, %r)" % (self.data, self.language)


_python = Atom(b"python")

_int4_unpack = Struct(b">I").unpack
_int2_unpack = Struct(b">H").unpack
_signed_int4_unpack = Struct(b">i").unpack
_float_unpack = Struct(b">d").unpack
_double_bytes_unpack = Struct(b"BB").unpack
_int4_byte_unpack = Struct(b">IB").unpack


def decode(string):
    """Decode Erlang external term."""
    if not string:
        raise IncompleteData(string)
    if string[0] != 131:
        raise ValueError("unknown protocol version: %r" % string[0])
    if string[1:2] == b'P':
        # compressed term
예제 #29
0
 def _field_short_uint(self,
                       unpacker=Struct('>H').unpack_from,
                       size=Struct('>H').size):
     rval = unpacker(self._input, self._pos)[0]
     self._pos += size
     return rval
예제 #30
0
파일: _tools.py 프로젝트: jinhuanz/MetPy
 def read_int(self, code):
     """Parse the current buffer offset as the specified integer code."""
     return self.read_struct(Struct(code))[0]