Esempio n. 1
0
def unpack_records(format, data):
    """
    Yield the records contained in a binary string
    """
    record_struct = Struct(format)
    for offset in range(0, len(data), record_struct.size):
        yield record_struct.unpack_from(data, offset)
Esempio n. 2
0
	def parseLine( self, line ):
		"Parse a gcode line."
		binary16ByteRepository = self.binary16ByteRepository
		splitLine = line.split()
		if len( splitLine ) < 1:
			return
		firstWord = splitLine[ 0 ]
		if len( firstWord ) < 1:
			return
		firstLetter = firstWord[ 0 ]
		if firstLetter == '(':
			return
		feedRateInteger = getIntegerFromCharacterLengthLineOffset( 'F', 0.0, splitLine, binary16ByteRepository.feedRateStepLength.value )
		iInteger = getIntegerFromCharacterLengthLineOffset( 'I', 0.0, splitLine, binary16ByteRepository.xStepLength.value )
		jInteger = getIntegerFromCharacterLengthLineOffset( 'J', 0.0, splitLine, binary16ByteRepository.yStepLength.value )
		xInteger = getIntegerFromCharacterLengthLineOffset( 'X', binary16ByteRepository.xOffset.value, splitLine, binary16ByteRepository.xStepLength.value )
		yInteger = getIntegerFromCharacterLengthLineOffset( 'Y', binary16ByteRepository.yOffset.value, splitLine, binary16ByteRepository.yStepLength.value )
		zInteger = getIntegerFromCharacterLengthLineOffset( 'Z', binary16ByteRepository.zOffset.value, splitLine, binary16ByteRepository.zStepLength.value )
		sixteenByteStruct = Struct( 'cBhhhhhhBc' )
#		print( 'xInteger' )
#		print( xInteger )
		flagInteger = getIntegerFlagFromCharacterSplitLine( 'X', splitLine )
		flagInteger += 2 * getIntegerFlagFromCharacterSplitLine( 'Y', splitLine )
		flagInteger += 4 * getIntegerFlagFromCharacterSplitLine( 'Z', splitLine )
		flagInteger += 8 * getIntegerFlagFromCharacterSplitLine( 'I', splitLine )
		flagInteger += 16 * getIntegerFlagFromCharacterSplitLine( 'J', splitLine )
		flagInteger += 32 * getIntegerFlagFromCharacterSplitLine( 'F', splitLine )
		packedString = sixteenByteStruct.pack( firstLetter, int( firstWord[ 1 : ] ), xInteger, yInteger, zInteger, iInteger, jInteger, feedRateInteger, flagInteger, '#' )
		self.output.write( packedString )
    def _read_radm(self, data, n):
        """
        RADM(8802,88,413) - record 25
        .. todo:: add object
        """
        struct_i = self.struct_i
        nmaterials = 0
        ndata = len(data)
        while n < ndata:  # 1*4
            packs = []
            edata = data[n:n+4]
            number, = struct_i.unpack(edata)
            n += 4

            iformat = 'i %if' % (number)
            struct_i_nf = Struct(b(self._endian + iformat))
            #mid, absorb, emiss1, emiss2, ...
            ndata_per_pack = 1 + number
            nstr_per_pack = ndata_per_pack * 4

            nfields = (ndata - n) // 4
            npacks = nfields // ndata_per_pack
            for ipack in range(npacks):
                edata = data[n:n+nstr_per_pack]
                pack = list(struct_i_nf.unpack(edata))
                packs.append(pack)
                n += nstr_per_pack

                mat = RADM.add_op2_data(pack)
                self.add_thermal_BC(mat, mat.radmid)
                nmaterials += 1

        self.card_count['RADM'] = nmaterials
        return n
Esempio n. 4
0
def write_records(records, format, f):
    """
    Write a sequence of iterables to a binary file
    """
    record_struct = Struct(format)
    for rec in records:
        f.write(record_struct.pack(*rec))
Esempio n. 5
0
    def __init__(self, info, prefmt=''):
        names, formats = zip(*info)

        # Remove empty names
        self._names = [n for n in names if n]

        Struct.__init__(self, prefmt + ''.join(f for f in formats if f))
Esempio n. 6
0
    def read_elements_binary(self, nelements):
        self.nElementsRead = nelements
        self.nElementsSkip = 0
        #print "starting read_elements"
        #print self.infile.tell(), 'elements'
        #isBuffered = True
        size = nelements * 12  # 12=3*4 all the elements

        elements = zeros(self.nElements*3, 'int32')

        n = 0
        s = Struct(b'>3000i')
        while size > 12000:  # 4k is 1000 elements
            data = self.infile.read(4 * 3000)
            nodes = s.unpack(data)
            elements[n : n + 3000] = nodes
            size -= 4 * 3000
            n += 3000

        assert size >= 0, 'size=%s' % size
        if size > 0:
            data = self.infile.read(size)
            Format = b'>%ii' % (size // 4)

            nodes = unpack(Format, data)
            elements[n:] = nodes

        #if isBuffered:
            #pass
        #else:
            #raise RuntimeError('unBuffered')
        elements2 = elements.reshape((nelements, 3))
        self.infile.read(8)  # end of third (element) block, start of regions (fourth) block
        #print "finished read_elements"
        return elements2
    def _read_mathp(self, data, n):
        """MATHP(4506,45,374) - Record 11"""
        nmaterials = 0
        s1 = Struct(b(self._endian + 'i7f3i23fi'))
        s2 = Struct(b(self._endian + '8i'))
        n2 = n
        while n2 < n:
            edata = data[n:n+140]
            n += 140
            out1 = s1.unpack(edata)
            (mid, a10, a01, d1, rho, alpha, tref, ge, sf, na, nd, kp,
             a20, a11, a02, d2,
             a30, a21, a12, a03, d3,
             a40, a31, a22, a13, a04, d4,
             a50, a41, a32, a23, a14, a05, d5,
             continue_flag) = out1
            data_in = [out1]

            if continue_flag:
                edata = data[n:n+32]  # 7*4
                n += 32
                out2 = s2.unpack(edata)
                (tab1, tab2, tab3, tab4, x1, x2, x3, tab5) = out2
                data_in.append(out2)
                mat = MATHP.add_op2_data(data_in)
            self.add_op2_material(mat)
            nmaterials += 1
        self.card_count['MATHP'] = nmaterials
        return n
Esempio n. 8
0
def leapseconds(tzfiles=['/usr/share/zoneinfo/right/UTC',
                         '/usr/lib/zoneinfo/right/UTC'],
                use_fallback=True):
    """Extract leap seconds from *tzfiles*."""
    for filename in tzfiles:
        try:
            file = open(filename, 'rb')
        except IOError:
            continue
        else:
            break
    else:  # no break
        if not use_fallback:
            raise ValueError('Unable to open any tzfile: %s' % (tzfiles,))
        else:
            return _fallback()

    with file:
        header = Struct('>4s c 15x 6i')  # see struct tzhead above
        (magic, version, _, _, leapcnt, timecnt, typecnt,
         charcnt) = header.unpack_from(file.read(header.size))
        if magic != "TZif".encode():
            raise ValueError('Wrong magic %r in tzfile: %s' % (
                magic, file.name))
        if version not in '\x0023'.encode():
            warn('Unsupported version %r in tzfile: %s' % (
                version, file.name), RuntimeWarning)
        if leapcnt == 0:
            raise ValueError("No leap seconds in tzfile: %s" % (
                file.name))

        """# from tzfile.h[2] (the file is in public domain)

         . . .header followed by. . .

         tzh_timecnt (char [4])s  coded transition times a la time(2)
         tzh_timecnt (unsigned char)s types of local time starting at above
         tzh_typecnt repetitions of
           one (char [4])  coded UT offset in seconds
           one (unsigned char) used to set tm_isdst
           one (unsigned char) that's an abbreviation list index
         tzh_charcnt (char)s  '\0'-terminated zone abbreviations
         tzh_leapcnt repetitions of
           one (char [4])  coded leap second transition times
           one (char [4])  total correction after above
        """
        file.read(timecnt * 5 + typecnt * 6 + charcnt)  # skip

        result = [LeapSecond(datetime(1972, 1, 1), timedelta(seconds=10))]
        nleap_seconds = 10
        tai_epoch_as_tai = datetime(1970, 1, 1, 0, 0, 10)
        buf = Struct(">2i")
        for _ in range(leapcnt):  # read leap seconds
            t, cnt = buf.unpack_from(file.read(buf.size))
            dTAI_UTC = nleap_seconds + cnt
            utc = tai_epoch_as_tai + timedelta(seconds=t - dTAI_UTC + 1)
            assert utc - datetime(utc.year, utc.month, utc.day) == timedelta(0)
            result.append(LeapSecond(utc, timedelta(seconds=dTAI_UTC)))
        result.append(sentinel)
        return result
Esempio n. 9
0
class StreamSerializer(object):
    """Helper to pass python objects over streams."""

    length_format = '!i'

    def __init__(self):
        self.length_struct = Struct(self.length_format)
        self.length = calcsize(self.length_format)

    @staticmethod
    def encode(obj):
        return pickle.dumps(obj)

    @staticmethod
    def decode(message):
        return pickle.loads(message)

    def encode_with_length(self, obj):
        """Encode object and prepend length to message."""
        message = self.encode(obj)
        return self.length_struct.pack(len(message)) + message

    def decode_from_stream(self, fd, timeout=5):
        """Read object from given stream and return it."""
        rlist, _, _ = select([fd], [], [], timeout)
        if not rlist:
            raise RuntimeError("Can't read object from {0!r}.".format(fd))
        message_length = self.length_struct.unpack(os.read(fd, self.length))[0]
        assert message_length > 0, 'wrong message length provided'
        return self.decode(os.read(fd, message_length))
Esempio n. 10
0
 def binrepr(cls, buffer):
   lenStruct = Struct("HHHH")
   (headerLen, _, pageClassLen, schemaDescLen) = lenStruct.unpack_from(buffer)
   if headerLen > 0 and pageClassLen > 0 and schemaDescLen > 0:
     return Struct("HHHH"+str(pageClassLen)+"s"+str(schemaDescLen)+"s")
   else:
     raise ValueError("Invalid header length read from storage file header")
Esempio n. 11
0
def write_records(records, format, f):
    '''
    Write a sequence of tuples to a binary file of structures.
    '''
    record_struct = Struct(format)
    for r in records:
        f.write(record_struct.pack(*r))
Esempio n. 12
0
 def putPacket(self, payload, flags=0):
     if flags & 0b00000001:
         payload = compress(payload)
     # length calculations
     blocklength = self.cipherscheme.block_size
     payloadlength = len(payload)
     paddinglength = 4 + blocklength - (10 + payloadlength) % blocklength
     packetlength = 6 + payloadlength + paddinglength
     # create packet
     fields = (
         packetlength,
         paddinglength,
         flags,
         payload,
         self.prng.read(paddinglength)
     )
     packetformat = Struct('!LBB%ds%ds' % (payloadlength, paddinglength))
     encpacket = bytearray(self.encipher.encrypt(packetformat.pack(*fields)))
     # update message authentication
     self.ehmac.update(self.longformat.pack(self.enseqno))
     self.ehmac.update(buffer(encpacket))
     self.enseqno += 1
     # append the most recent digest
     encpacket.extend(self.ehmac.digest())
     # put packet on the wire
     packet = buffer(encpacket)
     while packet:
         packet = packet[self.sock.send(packet):]
Esempio n. 13
0
 def _read_grid(self, data, n):  # 21.8 sec, 18.9
     """(4501,45,1) - the marker for Record 17"""
     s = Struct(b(self._endian + 'ii3f3i'))
     ntotal = 32
     nentries = (len(data) - n) // ntotal
     self._increase_card_count('GRID', nentries)
     for i in range(nentries):
         edata = data[n:n + 32]
         out = s.unpack(edata)
         (nid, cp, x1, x2, x3, cd, ps, seid) = out
         if self.is_debug_file:
             self.binary_debug.write('  GRID=%s\n' % str(out))
         if cd >= 0 and nid < 10000000:
             if ps == 0:
                 ps = ''
             node = GRID(nid, cp, np.array([x1, x2, x3]), cd, ps, seid)
             self.nodes[nid] = node
             #if nid in self.nodes:
                 #self.reject_lines.append(str(node))
             #else:
             self.nodes[nid] = node
             #self.add_node(node)
         else:
             self.log.debug("*nid=%s cp=%s x1=%-5.2f x2=%-5.2f x3=%-5.2f cd=%-2s ps=%s seid=%s" %
                            (nid, cp, x1, x2, x3, cd, ps, seid))
         n += ntotal
     return n
Esempio n. 14
0
    def read_points_binary(self, npoints):
        size = npoints * 12  # 12=3*4 all the points

        n = 0
        points = zeros(npoints * 3, dtype='float32')
        s = Struct(b'>3000f') # 3000 floats; 1000 points
        while size > 12000:  # 12k = 4 bytes/float*3 floats/point*1000 points
            data = self.infile.read(4 * 3000)

            nodeXYZs = s.unpack(data)
            points[n:n+3000] = nodeXYZs
            n += 3000
            size -= 4 * 3000

        assert size >= 0, 'size=%s' % size

        if size > 0:
            data = self.infile.read(size)
            Format = b'>%if' % (size // 4)

            nodeXYZs = unpack(Format, data)
            points[n:] = nodeXYZs

        points = points.reshape((npoints, 3))

        self.infile.read(8)  # end of second block, start of third block
        return points
Esempio n. 15
0
    def read_elements_binary(self, nelements):
        self.nElementsRead = nelements
        self.nElementsSkip = 0
        size = nelements * 12  # 12=3*4 all the elements

        elements = zeros(self.nElements*3, dtype='int32')

        n = 0
        s = Struct(b'>3000i')
        while size > 12000:  # 4k is 1000 elements
            data = self.infile.read(4 * 3000)
            nodes = s.unpack(data)
            elements[n : n + 3000] = nodes
            size -= 4 * 3000
            n += 3000

        assert size >= 0, 'size=%s' % size
        if size > 0:
            data = self.infile.read(size)
            Format = b'>%ii' % (size // 4)

            nodes = unpack(Format, data)
            elements[n:] = nodes

        elements2 = elements.reshape((nelements, 3))
        self.infile.read(8)  # end of third (element) block, start of regions (fourth) block
        return elements2
Esempio n. 16
0
 def _read_pbush_nx(self, data, n):
     """PBUSH(1402,14,37)"""
     #if self.table_name == ['EPTS', 'EPT']:
     ntotal = 72
     s = Struct(b(self._endian + 'i17f'))
     nentries = (len(data) - n) // ntotal
     assert nentries > 0, 'table=%r len=%s' % (self.table_name, len(data) - n)
     for i in range(nentries):
         edata = data[n:n+72]
         out = s.unpack(edata)
         (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
          g1, sa, st, ea, et) = out
         g2 = g3 = g4 = g5 = g6 = g1
         data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
                    g1, g2, g3, g4, g5, g6, sa, st, ea, et)
         prop = PBUSH.add_op2_data(data_in)
         self._add_op2_property(prop)
         n += ntotal
     #else:
         #ntotal = 92  # 23*4
         #s = Struct(b(self._endian + 'i22f'))
         #nentries = (len(data) - n) // ntotal
         #assert nentries > 0, 'table=%r len=%s' % (self.table_name, len(data) - n)
         #for i in range(nentries):
             #edata = data[n:n+92]
             #out = s.unpack(edata)
             #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
              #g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out
             #prop = PBUSH.add_op2_data(out)
             #self._add_op2_property(prop)
             #n += ntotal
     self.card_count['PBUSH'] = nentries
     return n
Esempio n. 17
0
    def __init__(self, fname):
        self.fname = fname
        dbg("reading blendomatic data from %s" % fname, 1, push="blendomatic")

        fname = file_get_path(fname, write=False)
        f = file_open(fname, binary=True, write=False)

        buf = f.read(Blendomatic.blendomatic_header.size)
        self.header = Blendomatic.blendomatic_header.unpack_from(buf)

        blending_mode_count, tile_count = self.header

        dbg("%d blending modes, each %d tiles" % (blending_mode_count, tile_count), 2)

        blending_mode = Struct(endianness + "I %dB" % (tile_count))

        self.blending_modes = list()

        for i in range(blending_mode_count):
            header_data  = f.read(blending_mode.size)
            bmode_header = blending_mode.unpack_from(header_data)

            new_mode = BlendingMode(i, f, tile_count, bmode_header)

            self.blending_modes.append(new_mode)

        f.close()
        dbg(pop="blendomatic")
Esempio n. 18
0
	def read(self, raw, offset):
		self.data = dict()

		#uint16_t terrain_restriction_count;
		#uint16_t terrain_count;
		header_struct = Struct(endianness + "H H")

		header = header_struct.unpack_from(raw, offset)
		offset += header_struct.size
		self.data["terrain_restriction_count"], self.data["terrain_count"] = header

		#int32_t terrain_restriction_offset0[terrain_restriction_count];
		#int32_t terrain_restriction_offset1[terrain_restriction_count];
		tr_offset_struct = Struct(endianness + "%di" % self.data["terrain_restriction_count"])

		self.data["terrain_restriction_offset0"] = tr_offset_struct.unpack_from(raw, offset)
		offset += tr_offset_struct.size
		self.data["terrain_restriction_offset1"] = tr_offset_struct.unpack_from(raw, offset)
		offset += tr_offset_struct.size

		self.data["terrain_restriction"] = list()
		for i in range(self.data["terrain_restriction_count"]):
			t = TerrainRestriction(self.data["terrain_count"])
			offset = t.read(raw, offset)
			self.data["terrain_restriction"] += [t.data]

		return offset
Esempio n. 19
0
    def _read_buckling_eigenvalue_4(self, data):
        # BLAMA - Buckling eigenvalue summary table
        # CLAMA - Complex eigenvalue summary table
        # LAMA - Normal modes eigenvalue summary table
        if self.read_mode == 1:
            return len(data)

        msg = '_read_buckling_eigenvalue_4'
        #return self._not_implemented_or_skip(data, msg)  # TODO: implement buckling eigenvalues

        ntotal = 4 * 7
        nModes = len(data) // ntotal
        n = 0
        #assert self.isubcase != 0, self.isubcase
        blama = BucklingEigenvalues(11)
        self.eigenvalues[self.Title] = blama
        #self.eigenvalues[self.isubcase] = lama
        s = Struct(b'ii5f')
        for i in range(nModes):
            edata = data[n:n+ntotal]
            out = s.unpack(edata)
            if self.debug4():
                self.binary_debug.write('  eigenvalue%s - %s\n' % (i, str(out)))
            (iMode, order, eigen, omega, freq, mass, stiff) = out # BLAMA??
            #(modeNum, extractOrder, eigenvalue, radian, cycle, genM, genK) = line  # LAMA
            #(rootNum, extractOrder, eigr, eigi, cycle, damping) = data  # CLAMA
            blama.addF06Line(out)
            n += ntotal
        return n
Esempio n. 20
0
 def __extract_fdt_dt(self):
     """Extract tags"""
     cell = Struct(self.__fdt_dt_cell_format)
     tags = []
     self.infile.seek(self.fdt_header['off_dt_struct'])
     while True:
         data = self.infile.read(cell.size)
         if len(data) < cell.size:
             break
         tag, = cell.unpack_from(data)
         # print "*** %s" % self.__fdt_dt_tag_name.get(tag, '')
         if self.__fdt_dt_tag_name.get(tag, '') in 'node_begin':
             name = self.__extract_fdt_nodename()
             if len(name) == 0:
                 name = '/'
             tags.append((tag, name))
         elif self.__fdt_dt_tag_name.get(tag, '') in ('node_end', 'nop'):
             tags.append((tag, ''))
         elif self.__fdt_dt_tag_name.get(tag, '') in 'end':
             tags.append((tag, ''))
             break
         elif self.__fdt_dt_tag_name.get(tag, '') in 'prop':
             propdata = self.__extract_fdt_prop()
             tags.append((tag, propdata))
         else:
             print("Unknown Tag %d" % tag)
     return tags
Esempio n. 21
0
	def read(self, raw, offset):
		self.data = dict()

		self.data["terrain_border"] = list()
		for i in range(16):
			t = TerrainBorder()
			offset = t.read(raw, offset)
			self.data["terrain_border"] += [t.data]

		#int8_t zero[28];
		#uint16_t terrain_count_additional;
		zero_terrain_count_struct = Struct(endianness + "28c H")
		pc = zero_terrain_count_struct.unpack_from(raw, offset)
		offset += zero_terrain_count_struct.size

		self.data["terrain_count_additional"] = pc[28]

		tmp_struct = Struct(endianness + "12722s")
		t = tmp_struct.unpack_from(raw, offset)
		offset_begin = offset
		offset += tmp_struct.size

		fname = 'raw/terrain_render_data_%d_to_%d.raw' % (offset_begin, offset)
		filename = file_get_path(fname, write=True)
		file_write(filename, t[0])

		return offset
Esempio n. 22
0
	def read(self, raw, offset):
		self.data = dict()

		#bool exists;
		unit_header_header_struct0 = Struct(endianness + "?")

		pc = unit_header_header_struct0.unpack_from(raw, offset)
		offset += unit_header_header_struct0.size

		self.data["exists"] = pc[0]

		if self.data["exists"] == True:
			unit_header_header_struct1 = Struct(endianness + "H")

			pc = unit_header_header_struct1.unpack_from(raw, offset)
			offset += unit_header_header_struct1.size

			self.data["unit_command_count"] = pc[0]

			self.data["unit_command"] = list()
			for i in range(self.data["unit_command_count"]):
				t = UnitCommand()
				offset = t.read(raw, offset)
				self.data["unit_command"] += [t.data]

		return offset
Esempio n. 23
0
def zmf2dict(fn):
    """Función que lee una librería de Zemax (archivo con terminación zmf), y genera un diccionario con las descripciones
    de cada componente. La llave es la referencia de cada componente
    """
    f=open(fn,"r")
    rd={}
    head = Struct("<I")
    lens = Struct("<100sIIIIIIIdd")
    shapes = "?EBPM"
    version, = head.unpack(f.read(head.size))
    assert version in (1001, )
    while True:
        li = f.read(lens.size)
        if len(li) != lens.size:
            if len(li) > 0:
                print(f, "additional data", repr(li))
            break
        li = list(lens.unpack(li))
        li[0] = li[0].decode("latin1").strip("\0")
        li[3] = shapes[li[3]]
        description = f.read(li[7])
        assert len(description) == li[7]
        description = zmf_obfuscate(description, li[8], li[9])
        description = description.decode("latin1")
        assert description.startswith("VERS {:06d}\n".format(li[1]))
        rd[li[0]]=description
    return rd
Esempio n. 24
0
    def _read_real_table(self, data, result_name, flag, is_cid=False):
        if self.debug4():
            self.binary_debug.write('  _read_real_table\n')
        assert flag in ['node', 'elem'], flag
        n = 0
        ntotal = 32 # 8 * 4
        dt = self.nonlinear_factor
        assert self.obj is not None

        obj = self.obj
        format1 = '2i6f' # 8

        nnodes = len(data) // ntotal

        assert nnodes > 0
        #assert len(data) % ntotal == 0
        s = Struct(format1)
        for inode in range(nnodes):
            edata = data[n:n+ntotal]
            out = s.unpack(edata)
            (eid_device, grid_type, tx, ty, tz, rx, ry, rz) = out

            eid = (eid_device - self.device_code) // 10
            if self.debug4():
                self.binary_debug.write('  %s=%i; %s\n' % (flag, eid, str(out)))
            obj.add(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
            n += ntotal
        return n
Esempio n. 25
0
    def _read_complex_eigenvalue_4(self, data):
        """parses the Complex Eigenvalues Table 4 Data"""
        if self.read_mode == 1:
            return len(data)

        ntotal = 4 * 6
        nmodes = len(data) // ntotal
        n = 0
        #assert self.isubcase != 0, self.isubcase
        clama = ComplexEigenvalues(11)
        self.eigenvalues[self.Title] = clama
        #self.eigenvalues[self.isubcase] = lama
        s = Struct(b'ii4f')
        for i in range(nmodes):
            edata = data[n:n+ntotal]
            out = s.unpack(edata)
            if self.debug4():
                self.binary_debug.write('  eigenvalue%s - %s\n' % (i, str(out)))
            (imode, order, eigr, eigc, freq, damping) = out # CLAMA
            #print('imode=%s order=%s eigr=%s eigc=%s freq=%s damping=%s' %
                  #(imode, order, eigr, eigc, freq, damping))
            clama.addF06Line(out)
            n += ntotal
        assert n == len(data), 'clama length error'
        return n
Esempio n. 26
0
class ULInt24(StaticField):
    """
    A custom made construct for handling 3-byte types as used in ancient file formats.
    A better implementation would be writing a more flexable version of FormatField,
    rather then specifically implementing it for this case
    """
    __slots__ = ["packer"]
    def __init__(self, name):
        self.packer = Packer("<BH")
        StaticField.__init__(self, name, self.packer.size)
    def __getstate__(self):
        attrs = StaticField.__getstate__(self)
        attrs["packer"] = attrs["packer"].format
        return attrs
    def __setstate__(self, attrs):
        attrs["packer"] = Packer(attrs["packer"])
        return StaticField.__setstate__(attrs)
    def _parse(self, stream, context):
        try:
            vals = self.packer.unpack(_read_stream(stream, self.length))
            return vals[0] + (vals[1] << 8)
        except Exception:
            ex = sys.exc_info()[1]
            raise FieldError(ex)
    def _build(self, obj, stream, context):
        try:
            vals = (obj%256, obj >> 8)
            _write_stream(stream, self.length, self.packer.pack(vals))
        except Exception:
            ex = sys.exc_info()[1]
            raise FieldError(ex)
Esempio n. 27
0
def write_records(records, format, f):
    '''
	Zapis sekwencji krotek do pliku binarnego ze strukturami
    '''
    record_struct = Struct(format)
    for r in records:
        f.write(record_struct.pack(*r))
Esempio n. 28
0
def array_packing(arrdef, *more_arrdef):
    """pack mulltiple arrays into same str

    take care of alignments between arrays
    """
    arrtype, arr = arrdef
    mystruct = Struct(arrtype)
    last_bytes = mystruct.size
    cur_size = last_bytes * len(arr)
    mybuffer = StringIO()
    mybuffer.write(''.join(mystruct.pack(val) for val in arr))
    for arrtype, arr in more_arrdef:
        mystruct = Struct(arrtype)
        cur_bytes = mystruct.size
        if cur_bytes > last_bytes:
            # align the string
            fill_bytes = align2pitch(cur_size, cur_bytes)
            mybuffer.write(ALIGN_CHAR * fill_bytes)
            cur_bytes += fill_bytes

        # write this arr
        cur_size = last_bytes * len(arr)
        mybuffer.write(''.join(mystruct.pack(val) for val in arr))

        # leave notes
        last_bytes = cur_bytes

    rtn = mybuffer.getvalue()
    mybuffer.close()
    return rtn
Esempio n. 29
0
    def write_binary_stl(self, stl_filename):
        """Write an STL binary file."""
        infile = open(stl_filename, "wb")

        if hasattr(self, 'header'):
            self.header.ljust(80, '\0')
            infile.write(self.header)
        else:
            header = '%-80s' % stl_filename
            infile.write(pack('80s', header))
        #avector = [0., 0., 0.]
        #bvector = [0., 0., 0.]
        #cvector = [0., 0., 0.]
        nelements = self.elements.shape[0]
        infile.write(pack('i', nelements))
        elements = self.elements

        p1 = self.nodes[elements[:, 0], :]
        p2 = self.nodes[elements[:, 1], :]
        p3 = self.nodes[elements[:, 2], :]
        avector = p2 - p1
        bvector = p3 - p1
        n = cross(avector, bvector)
        del avector, bvector
        #n /= norm(n, axis=1)

        s = Struct('12fH')
        for eid, element in enumerate(elements):
            data = s.pack(n[eid, 0], n[eid, 1], n[eid, 2],
                          p1[eid, 0], p1[eid, 1], p1[eid, 2],
                          p2[eid, 0], p2[eid, 1], p2[eid, 2],
                          p3[eid, 0], p3[eid, 1], p3[eid, 2], 0)
            infile.write(data)
        infile.close()
Esempio n. 30
0
    def _readMATHP(self, data, n):
        """MATHP(4506,45,374) - Record 11"""
        #print "reading MATHP"
        nmaterials = 0
        s1 = Struct(b(self._endian + 'i7f3i23fi'))
        s2 = Struct(b(self._endian + '8i'))
        n2 = n
        while n2 < n:
            eData = data[n:n+140]
            n += 140
            out1 = s1.unpack(eData)
            (mid, a10, a01, d1, rho, alpha, tref, ge, sf, na, nd, kp,
             a20, a11, a02, d2,
             a30, a21, a12, a03, d3,
             a40, a31, a22, a13, a04, d4,
             a50, a41, a32, a23, a14, a05, d5,
             continueFlag) = out1
            dataIn = [out1]

            if continueFlag:
                eData = data[n:n+32]  # 7*4
                n += 32
                out2 = s2.unpack(eData)
                (tab1, tab2, tab3, tab4, x1, x2, x3, tab5) = out2
                data.append(out2)
            self.addOp2Material(MATHP(None, dataIn))
            nmaterials += 1
        self.card_count['MATHP'] = nmaterials
        return n
Esempio n. 31
0
class joe_pack:
    version = b'JPK01.00'
    bstruct = Struct('<2i')

    def __init__(self):
        self.numobjs = 0
        self.maxstrlen = 0
        self.joe = {}
        self.list = {}
        self.images = {}
        self.surfaces = []

    @staticmethod
    def read(filename):
        # don't change call order
        jpk = joe_pack()
        jpk.load_list(filename)
        jpk.load_images(filename)
        try:
            jpk.load(filename)
        except:
            jpk.load_joes(filename)
        return jpk

    @staticmethod
    def write(filename, write_list, write_jpk):
        jpk = joe_pack().from_mesh()
        if write_jpk:
            jpk.save(filename)
        if write_list:
            jpk.save_list(filename)

    def to_mesh(self):
        trackobject.create_groups()
        for name, joe in self.joe.items():
            image = None
            trackobj = self.list.get(name)
            if trackobj:
                imagename = trackobj.values[1]
                image = self.images[imagename]
                obj = joe.to_mesh(name, image)
                trackobj.to_obj(obj)
            else:
                print(name + ' not imported. Not in list.txt.')

    def from_mesh(self):
        objlist = bpy.context.scene.objects
        trackobject.set_groups()
        for obj in objlist:
            if obj.type != 'MESH':
                continue
            if obj.name.startswith('~'):
                continue
            if not obj.data.tessfaces:
                obj.data.calc_tessface()
                if len(obj.data.tessfaces) == 0:
                    print(obj.name + ' not exported. No faces.')
                    continue
            if not obj.data.tessface_uv_textures:
                print(obj.name + ' not exported. No texture coordinates.')
                continue
            image = None
            if obj.data.tessface_uv_textures[0].data[0].image:
                image = obj.data.tessface_uv_textures[0].data[0].image
            else:
                for mat_slot in obj.material_slots:
                    for mtex_slot in mat_slot.material.texture_slots:
                        if mtex_slot and hasattr(mtex_slot.texture, 'image'):
                            image = mtex_slot.texture.image
                            break
            if not image:
                print(obj.name + ' not exported. No texture linked.')
                continue
            objname = obj.name
            trackobj = trackobject().from_obj(obj,
                                              path.basename(image.filepath))
            # override obj name
            if len(trackobj.values[0]):
                objname = trackobj.values[0]
            # loader expects a joe file
            if not objname.endswith('.joe'):
                objname = objname + '.joe'
                trackobj.values[0] = objname
            self.list[objname] = trackobj
            self.joe[objname] = obj
            self.maxstrlen = max(self.maxstrlen, len(objname))
        self.numobjs = len(self.joe)
        return self

    # fallback if no jpk
    def load_joes(self, filename):
        dir = path.dirname(filename)
        for name in self.list:
            joe_path = path.join(dir, name)
            file = open(joe_path, 'rb')
            joe = joe_obj().load(file)
            self.joe[name] = joe

    def load(self, filename):
        file = open(filename, 'rb')
        # header
        version = file.read(len(joe_pack.version))
        if version != joe_pack.version:
            raise Exception(filename + ' unknown jpk version: ' +
                            str(version) + ' expected: ' +
                            str(joe_pack.version))
        data = file.read(joe_pack.bstruct.size)
        v = joe_pack.bstruct.unpack(data)
        self.numobjs = v[0]
        self.maxstrlen = v[1]
        # fat
        fat = []
        for i in range(self.numobjs):
            data = file.read(joe_pack.bstruct.size)
            v = joe_pack.bstruct.unpack(data)
            offset = v[0]
            length = v[1]
            data = file.read(self.maxstrlen)
            # strip trailing zeros
            for i in range(self.maxstrlen):
                if data[i] == 0:
                    data = data[:i]
                    break
            name = data.decode('ascii')
            fat.append((offset, length, name))
        # data
        for offset, length, name in fat:
            pos = file.tell()
            delta = offset - pos
            if delta < 0:
                print('Error reading: ', name, offset)
                return
            elif delta > 0:
                file.read(delta)
            joe = joe_obj().load(file)
            self.joe[name] = joe
        file.close()

    def save(self, filename):
        try:
            file = open(filename, 'rb+')
        except IOError:
            file = open(filename, 'wb')
        # header
        file.write(self.version)
        data = joe_pack.bstruct.pack(self.numobjs, self.maxstrlen)
        file.write(data)
        # allocate fat
        fat_offset = file.tell()
        for i in range(self.numobjs):
            data = joe_pack.bstruct.pack(0, 0)
            file.write(data)
            name = util.fillz('', self.maxstrlen)
            file.write(name.encode('ascii'))
        # write data / build fat
        fat = []
        for name, obj in self.joe.items():
            offset = file.tell()
            joe = joe_obj().from_mesh(obj)
            joe.save(file)
            length = file.tell() - offset
            fat.append((offset, length, name))
        # fill fat
        file.seek(fat_offset)
        for offset, length, name in fat:
            data = joe_pack.bstruct.pack(offset, length)
            file.write(data)
            name = util.fillz(name, self.maxstrlen)
            file.write(name.encode('ascii'))
        file.close()

    def load_list(self, filename):
        dir = path.dirname(filename)
        list_path = path.join(dir, 'list.txt')
        try:
            list_file = open(list_path)
        except IOError:
            print(list_path + ' not found.')
            return
        # read objects
        line = list_file.readline()
        while line != '':
            if not line.startswith('#') and '.joe' in line:
                object = trackobject()
                name = line.strip()
                line = object.read(name, list_file)
                self.list[object.values[0]] = object
            else:
                line = list_file.readline()
        if len(self.list) == 0:
            print('Failed to load list.txt.')
        list_file.close()

    def save_list(self, filename):
        dir = path.dirname(filename)
        list_path = path.join(dir, 'list.txt')
        file = open(list_path, 'w')
        file.write('17\n\n')
        i = 0
        for name, object in self.list.items():
            file.write('#entry ' + str(i) + '\n')
            object.write(file)
            i = i + 1
        file.close()

    def load_images(self, filename):
        dir = path.dirname(filename)
        for name, object in self.list.items():
            imagename = object.values[1]
            if imagename not in self.images:
                imagepath = path.join(dir, imagename)
                self.images[imagename] = load_image(imagepath)
Esempio n. 32
0
    def write_geom1(self, op2, op2_ascii, obj):
        #if not hasattr(obj, 'nodes'):
        #return
        nnodes = len(obj.nodes)
        ncoords = len(obj.coords)
        if nnodes or ncoords:
            data = [
                4,
                2,
                4,
                #4, 2,4,
                8,
                b'GEOM1   ',
                8,
                4,
                -1,
                4,
                #4, 1, 4,
                #4, 0, 4,
            ]
            op2.write(pack('4i 8s i 3i', *data))
            op2_ascii.write(str(data) + '\n')

            data = [
                4,
                7,
                4,
                28,
                1,
                2,
                3,
                4,
                5,
                6,
                7,
                28,
            ]
            op2.write(pack('3i 9i', *data))
            op2_ascii.write(str(data) + '\n')

            #-------------------------------------
            data = [4, -2, 4, 4, 1, 4, 4, 0, 4]
            op2.write(pack('9i', *data))
            op2_ascii.write(str(data) + '\n')

            data = [
                #4, 0, 4,
                4,
                2,
                4,
                8,
                1,
                2,
                8,
            ]
            op2.write(pack('3i 4i', *data))
            op2_ascii.write(str(data) + '\n')
            #data = [8, 1, 2, 8]
            #op2.write(pack('4i', *data))
            #-------------------------------------

            data = [4, -3, 4, 4, 1, 4, 4, 0, 4]
            op2.write(pack('9i', *data))
            op2_ascii.write(str(data) + '\n')

            if nnodes:
                #nvalues = nnodes * 8
                #nbytes = nvalues * 4
                #nnodes = 72
                bytes_per_id = 32
                assert nnodes == 72, nnodes
                nbytes = bytes_per_id * nnodes + 12  # 12 comes from the keys
                nvalues = nbytes // 4
                assert nbytes == 2316, nbytes
                op2.write(pack('3i', *[4, nvalues, 4]))
                op2.write(pack('i', nbytes))  #values, nbtyes))

                #op2.write(pack('3i', *[4, 0, 4]))
                #op2_ascii.write(str([4, 0, 4])) #values, nbtyes))

                #(4501,  45,  1): ['GRID',   self._read_grid],
                key = (4501, 45, 1)
                op2.write(pack('3i', *key))
                op2_ascii.write(str(key) + '\n')

                spack = Struct('ii 3f 3i')
                for nid, node in sorted(iteritems(obj.nodes)):
                    xyz = node.xyz
                    ps = node.ps
                    if ps == '':
                        psi = 0
                    else:
                        psi = int(ps)

                    seid = node.seid
                    if seid == '':
                        seidi = 0
                    else:
                        seidi = int(seid)
                    nid = node.nid
                    nid * 10 + 1
                    data = [
                        node.nid,
                        node.Cp(), xyz[0], xyz[1], xyz[2],
                        node.Cd(), psi, seidi
                    ]
                    op2.write(spack.pack(*data))
                    op2_ascii.write(
                        '  nid=%s cp=%s xyz=(%s, %s, %s) cd=%s ps=%s seid=%s\n'
                        % tuple(data))
                op2.write(pack('i', nbytes))

                #-------------------------------------
                data = [4, -4, 4, 4, 1, 4, 4, 0, 4]
                op2.write(pack('9i', *data))
                op2_ascii.write(str(data) + '\n')

                data = [4, 3, 4, 12, 1, 2, 3, 12]
                op2.write(pack('3i 5i', *data))
                op2_ascii.write(str(data) + '\n')
                #-------------------------------------
                data = [4, -5, 4, 4, 1, 4, 4, 0, 4]
                op2.write(pack('9i', *data))
                op2_ascii.write(str(data) + '\n')

                data = [
                    4,
                    0,
                    4,
                    #4, 2, 4
                ]
                op2.write(pack('3i', *data))
                op2_ascii.write(str(data) + '\n')

                #-------------------------------------

            if ncoords:
                #(1701,  17,  6): ['CORD1C', self._read_cord1c],
                #(1801,  18,  5): ['CORD1R', self._read_cord1r],
                #(1901,  19,  7): ['CORD1S', self._read_cord1s],
                #(2001,  20,  9): ['CORD2C', self._read_cord2c],
                #(2101,  21,  8): ['CORD2R', self._read_cord2r],
                #(2201,  22, 10): ['CORD2S', self._read_cord2s],
                #(14301,143,651): ['CORD3G', self._read_cord3g],
                pass
            _write_markers(op2, op2_ascii, [2, 4])
Esempio n. 33
0
    def write_geom2(self, op2, op2_ascii, obj):
        #if not hasattr(obj, 'nodes'):
        #return
        nelements = len(obj.elements)
        if nelements:
            data = [
                4,
                2,
                4,
                #4, 2,4,
                8,
                b'GEOM2   ',
                8,
                4,
                -1,
                4,
                #4, 1, 4,
                #4, 0, 4,
            ]
            op2.write(pack('4i 8s i 3i', *data))
            op2_ascii.write(str(data) + '\n')

            data = [
                4,
                7,
                4,
                28,
                1,
                2,
                3,
                4,
                5,
                6,
                7,
                28,
            ]
            op2.write(pack('3i 9i', *data))
            op2_ascii.write(str(data) + '\n')

            #-------------------------------------
            data = [4, -2, 4, 4, 1, 4, 4, 0, 4]
            op2.write(pack('9i', *data))
            op2_ascii.write(str(data) + '\n')

            data = [
                #4, 0, 4,
                4,
                2,
                4,
                8,
                1,
                2,
                8,
            ]
            op2.write(pack('3i 4i', *data))
            op2_ascii.write(str(data) + '\n')
            #data = [8, 1, 2, 8]
            #op2.write(pack('4i', *data))
            #-------------------------------------

            data = [4, -3, 4, 4, 1, 4, 4, 0, 4]
            op2.write(pack('9i', *data))
            op2_ascii.write(str(data) + '\n')

            if nelements:
                out = obj.get_card_ids_by_card_types(
                    ['CTETRA', 'CHEXA', 'CPENTA'])
                for name, eids in out:
                    nelements = len(eids)
                    if name == 'CTETRA':
                        key = (5508, 55, 217)
                        nbytes_per_id = 48
                    else:
                        raise NotImplementedError(name)

                #if name in ['CTETRA', 'CHEXA', 'CPENTA']:
                #data_in = [eid, pid, n1, n2, n3, n4]
                #bigNodes = [n5, n6, n7, n8, n9, n10]
                #if sum(bigNodes) > 0:
                #elem = CTETRA10(data_in + bigNodes)
                #else:
                #elem = CTETRA4(data_in)

                    nbytes = nbytes_per_id * nelements + 12  # 12 comes from the keys
                    nvalues = nbytes // 4
                    op2.write(pack('3i', *[4, nvalues, 4]))
                    op2.write(pack('i', nbytes))  #values, nbtyes))

                    #key = (4501, 45, 1)
                    op2.write(pack('3i', *key))
                    op2_ascii.write(str(key) + '\n')

                    spack = Struct(endian + b'12i')
                    for eid in sorted(eids):
                        elem = obj.elements[eid]
                        nids = elem.node_ids
                        pid = elem.pid
                        assert None not in nids, nids

                        data = [eid, pid] + nids
                        op2.write(spack.pack(*data))
                        op2_ascii.write('  eid=%s pid=%s nids=%s\n' %
                                        (eid, pid, str(nids)))
                    op2.write(pack('i', nbytes))

                #-------------------------------------
                data = [4, -4, 4, 4, 1, 4, 4, 0, 4]
                op2.write(pack('9i', *data))
                op2_ascii.write(str(data) + '\n')

                data = [4, 3, 4, 12, 1, 2, 3, 12]
                op2.write(pack('3i 5i', *data))
                op2_ascii.write(str(data) + '\n')
                #-------------------------------------
                data = [4, -5, 4, 4, 1, 4, 4, 0, 4]
                op2.write(pack('9i', *data))
                op2_ascii.write(str(data) + '\n')

                data = [
                    4,
                    0,
                    4,
                    #4, 2, 4
                ]
                op2.write(pack('3i', *data))
                op2_ascii.write(str(data) + '\n')

                #-------------------------------------

            if ncoords:
                #(1701,  17,  6): ['CORD1C', self._read_cord1c],
                #(1801,  18,  5): ['CORD1R', self._read_cord1r],
                #(1901,  19,  7): ['CORD1S', self._read_cord1s],
                #(2001,  20,  9): ['CORD2C', self._read_cord2c],
                #(2101,  21,  8): ['CORD2R', self._read_cord2r],
                #(2201,  22, 10): ['CORD2S', self._read_cord2s],
                #(14301,143,651): ['CORD3G', self._read_cord3g],
                pass
            _write_markers(op2, op2_ascii, [2, 4])
Esempio n. 34
0
# -*- coding: utf-8 -*-

# standard library
import logging
from struct import Struct, error as StructError

# project
from ottd_ctrl.const import AdminUpdateFrequencyStr, AdminUpdateTypeStr, NetworkErrorCodeStr, PacketTypes
from ottd_ctrl.protocol import Boolean, Date, String, SInt64, Type, UInt8, UInt16, UInt32, UInt64

# pack formats (all little endian)
size_fmt = Struct('<H')  # 2 bytes
type_fmt = Struct('<B')  # 1 byte
size_len = size_fmt.size
type_len = type_fmt.size
delimiter_size = 1

DATE_FMT = '%Y.%m.%d'


class PacketError(Exception):
    """Base Packet related errors"""
    pass


class PacketConstructionError(PacketError):
    """Error while constructing packet"""
    pass


class PacketEncodeError(PacketError):
Esempio n. 35
0
    def _read_element_strain_energy(self, data, ndata):
        """
        table_code = 19
        """
        dt = self.nonlinear_factor
        n = 0

        if self.data_code['element_name'] == 'BAR':
            result_name = 'cbar_strain_energy'
        elif self.data_code['element_name'] == 'BEAM':
            result_name = 'cbeam_strain_energy'
        elif self.data_code['element_name'] == 'BEND':
            result_name = 'cbend_strain_energy'

        elif self.data_code['element_name'] == 'ROD':
            result_name = 'crod_strain_energy'
        elif self.data_code['element_name'] == 'TUBE':
            result_name = 'ctube_strain_energy'
        elif self.data_code['element_name'] == 'CONROD':
            result_name = 'conrod_strain_energy'

        elif self.data_code['element_name'] in ['TRIA3', 'TRIAFD', 'TRIA3FD']:
            result_name = 'ctria3_strain_energy'
        elif self.data_code['element_name'] == 'TRIA6':
            result_name = 'ctria6_strain_energy'
        elif self.data_code['element_name'] == 'TRIAX6':
            result_name = 'ctriax6_strain_energy'
        elif self.data_code['element_name'] == 'TRIAR':
            result_name = 'ctriar_strain_energy'
        elif self.data_code['element_name'] in ['TRIAX3FD', 'TRIAXFD']:
            result_name = 'ctriax_strain_energy'

        elif self.data_code['element_name'] in ['QUAD4', 'QUADFD', 'QUAD4FD']:
            result_name = 'cquad4_strain_energy'
        elif self.data_code['element_name'] == 'QUAD8':
            result_name = 'cquad8_strain_energy'
        elif self.data_code['element_name'] == 'QUADR':
            result_name = 'cquadr_strain_energy'
        elif self.data_code['element_name'] in ['QUADXFD', 'QUADX4FD']:
            result_name = 'cquadx_strain_energy'
        elif self.data_code['element_name'] == 'SHEAR':
            result_name = 'cshear_strain_energy'

        elif self.data_code['element_name'] in ['HEXA', 'HEXAFD', 'HEXA8FD']:
            result_name = 'chexa_strain_energy'
        elif self.data_code['element_name'] in [
                'PENTA', 'PENTAFD', 'PENTA6FD'
        ]:
            result_name = 'cpenta_strain_energy'
        elif self.data_code['element_name'] in [
                'TETRA', 'TETRAFD', 'TETRA4FD'
        ]:
            result_name = 'ctetra_strain_energy'
        elif self.data_code['element_name'] in ['PYRAM']:
            result_name = 'cpyram_strain_energy'

        elif self.data_code['element_name'] == 'GAP':
            result_name = 'cgap_strain_energy'
        elif self.data_code['element_name'] == 'BUSH':
            result_name = 'cbush_strain_energy'

        elif self.data_code['element_name'] == 'ELAS1':
            result_name = 'celas1_strain_energy'
        elif self.data_code['element_name'] == 'ELAS2':
            result_name = 'celas2_strain_energy'
        elif self.data_code['element_name'] == 'ELAS3':
            result_name = 'celas3_strain_energy'
        elif self.data_code['element_name'] == 'ELAS4':
            result_name = 'celas4_strain_energy'

        elif self.data_code['element_name'] == 'DUM8':
            result_name = 'cdum8_strain_energy'
        elif self.data_code['element_name'] == 'DMIG':
            result_name = 'dmig_strain_energy'
        elif self.data_code['element_name'] == 'GENEL':
            result_name = 'genel_strain_energy'
        else:
            #result_name = 'chexa8fd_strain_energy'

            raise NotImplementedError('element_name=%r' %
                                      (self.data_code['element_name']))
        #result_name = 'strain_energy'
        slot = getattr(self, result_name)

        auto_return = False
        self._results._found_result(result_name)

        if self.is_debug_file:
            self.binary_debug.write('cvalares = %s\n' % self.cvalres)
        if self.format_code == 1 and self.num_wide == 4:
            assert self.cvalres in [0, 1], self.cvalres

            ntotal = 16
            nelements = ndata // ntotal
            auto_return, is_vectorized = self._create_oes_object4(
                nelements, result_name, slot, RealStrainEnergyArray)

            if auto_return:
                #if obj.dt_temp is None or obj.itime is None and obj.dt_temp == dt:
                #element_name = self.data_code['element_name']
                #if element_name in obj.element_name_count:
                #obj.element_name_count[element_name] += nelements
                #else:
                #obj.element_name_count[element_name] = nelements
                #obj.dt_temp = dt
                return nelements * self.num_wide * 4
            #itime = obj.itime #// obj.nelement_types

            obj = self.obj
            itime = obj.itime

            if self.is_debug_file:
                self.binary_debug.write(
                    '  [cap, element1, element2, ..., cap]\n')
                self.binary_debug.write(
                    '  cap = %i  # assume 1 cap when there could have been multiple\n'
                    % ndata)
                self.binary_debug.write(
                    '  #elementi = [eid_device, energy, percent, density]\n')
                self.binary_debug.write('  nelements=%i\n' % nelements)

            if self.use_vector:
                n = nelements * 4 * self.num_wide
                ielement = obj.ielement
                ielement2 = obj.ielement + nelements
                itotal = obj.itotal
                itotal2 = obj.itotal + nelements * 4

                floats = fromstring(data,
                                    dtype=self.fdtype).reshape(nelements, 4)
                obj._times[itime] = dt
                #if obj.itime == 0:
                ints = fromstring(data,
                                  dtype=self.idtype).reshape(nelements, 4)
                eids = ints[:, 0] // 10
                assert eids.min() > 0, eids.min()
                obj.element[itime, ielement:ielement2] = eids

                #[energy, percent, density]
                obj.data[itime, ielement:ielement2, :] = floats[:, 1:]
                obj.itotal2 = itotal2
                obj.ielement = ielement2
            else:
                struct1 = Struct(b(self._endian + 'i3f'))
                for i in range(nelements):
                    edata = data[n:n + ntotal]

                    out = struct1.unpack(edata)
                    (eid_device, energy, percent, density) = out
                    eid = eid_device // 10
                    if self.is_debug_file:
                        self.binary_debug.write('  eid=%i; %s\n' %
                                                (eid, str(out)))
                    self.obj.add_sort1(dt, eid, energy, percent, density)
                    n += ntotal
        elif self.format_code == 1 and self.num_wide == 5:
            assert self.cvalres in [0, 1, 2], self.cvalres  # 0??
            ntotal = 20
            nnodes = ndata // ntotal
            nelements = nnodes

            auto_return, is_vectorized = self._create_oes_object4(
                nelements, result_name, slot, RealStrainEnergyArray)
            if auto_return:
                return nelements * self.num_wide * 4

            obj = self.obj
            if self.use_vector:
                n = nelements * 4 * self.num_wide
                itotal = obj.ielement
                ielement2 = obj.itotal + nelements
                itotal2 = ielement2

                floats = fromstring(data,
                                    dtype=self.fdtype).reshape(nelements, 5)
                obj._times[obj.itime] = dt

                strings = fromstring(data, dtype=self._endian + 'S4').reshape(
                    nelements, 5)
                s = array(
                    [s1 + s2 for s1, s2 in zip(strings[:, 1], strings[:, 2])])
                if obj.itime == 0:
                    ints = fromstring(data,
                                      dtype=self.idtype).reshape(nelements, 5)
                    eids = ints[:, 0] // 10
                    assert eids.min() > 0, eids.min()
                    obj.element[itotal:itotal2] = eids
                    obj.element_type[obj.itime, itotal:itotal2, :] = s

                #[energy, percent, density]
                obj.data[obj.itime, itotal:itotal2, :] = floats[:, 3:]
                obj.itotal = itotal2
                obj.ielement = ielement2
            else:
                s = Struct(b(self._endian + '8s3f'))
                for i in range(nnodes):
                    edata = data[n:n + 20]
                    out = s.unpack(edata)
                    (word, energy, percent, density) = out
                    word = word.strip()
                    #print "eType=%s" % (eType)
                    #print "%s" %(self.get_element_type(self.element_type)), data_in
                    #eid = self.obj.add_new_eid(out)
                    if self.is_debug_file:
                        self.binary_debug.write('  eid=%i; %s\n' %
                                                (eid, str(out)))
                    obj.add_sort1(dt, word, energy, percent, density)
                    n += ntotal
        elif self.format_code in [2, 3] and self.num_wide == 5:
            #ELEMENT-ID   STRAIN-ENERGY (MAG/PHASE)  PERCENT OF TOTAL  STRAIN-ENERGY-DENSITY
            #    5         2.027844E-10 /   0.0            1.2581            2.027844E-09
            ntotal = 20
            nelements = ndata // ntotal
            auto_return, is_vectorized = self._create_oes_object4(
                nelements, result_name, slot, ComplexStrainEnergyArray)
            if auto_return:
                return nelements * self.num_wide * 4

            obj = self.obj
            if self.use_vector:
                n = nelements * 4 * self.num_wide
                itotal = obj.ielement
                ielement2 = obj.itotal + nelements
                itotal2 = ielement2

                floats = fromstring(data,
                                    dtype=self.fdtype).reshape(nelements, 5)
                obj._times[obj.itime] = dt

                #if obj.itime == 0:
                ints = fromstring(data,
                                  dtype=self.idtype).reshape(nelements, 5)
                eids = ints[:, 0] // 10
                assert eids.min() > 0, eids.min()
                obj.element[itotal:itotal2] = eids
                #obj.element_type[obj.itime, itotal:itotal2, :] = s

                #[energyr, energyi, percent, density]
                obj.element[obj.itime, itotal:itotal2] = eids
                obj.data[obj.itime, itotal:itotal2, :] = floats[:, 1:]
                obj.itotal = itotal2
                obj.ielement = ielement2
            else:
                s = Struct(b(self._endian + 'i4f'))
                for i in range(nelements):
                    edata = data[n:n + 20]
                    out = s.unpack(edata)
                    (eid_device, energyr, energyi, percent, density) = out
                    eid = eid_device // 10
                    #if is_magnitude_phase:
                    #energy = polar_to_real_imag(energyr, energyi)
                    #else:
                    #energy = complex(energyr, energyi)

                    if self.is_debug_file:
                        self.binary_debug.write('  eid=%i; %s\n' %
                                                (eid, str(out)))
                    obj.add_sort1(dt, eid, energyr, energyi, percent, density)
                    n += ntotal

        elif self.format_code == 1 and self.num_wide == 6:  ## TODO: figure this out...
            ntotal = 24
            nnodes = ndata // ntotal
            auto_return, is_vectorized = self._create_oes_object4(
                nelements, result_name, slot, RealStrainEnergyArray)

            obj = self.obj
            if self.use_vector:
                n = nelements * 4 * self.num_wide
                itotal = obj.ielement
                ielement2 = obj.itotal + nelements
                itotal2 = ielement2

                floats = fromstring(data,
                                    dtype=self.fdtype).reshape(nelements, 5)
                obj._times[obj.itime] = dt

                if obj.itime == 0:
                    strings = fromstring(data,
                                         dtype=self._endian + 'S4').reshape(
                                             nelements, 6)
                    s = array([
                        s1 + s2 for s1, s2 in zip(strings[:, 1], strings[:, 2])
                    ])

                    ints = fromstring(data,
                                      dtype=self.idtype).reshape(nelements, 6)
                    eids = ints[:, 0] // 10
                    assert eids.min() > 0, eids.min()
                    obj.element[itotal:itotal2] = eids
                    obj.element_type[obj.itime, itotal:itotal2, :] = s

                #[energy, percent, density]
                obj.data[obj.itime, itotal:itotal2, :] = floats[:, 4:]
                obj.itotal = itotal2
                obj.ielement = ielement2
            else:
                struct1 = Struct(b(self._endian + 'i8s3f'))
                for i in range(nnodes):
                    edata = data[n:n + 24]
                    out = struct1.unpack(edata)
                    (word, energy, percent,
                     density) = out  # TODO: this has to be wrong...
                    word = word.strip()
                    #print "eType=%s" % (eType)
                    #print "%s" %(self.get_element_type(self.element_type)), data_in
                    #eid = self.obj.add_new_eid(out)
                    if self.is_debug_file:
                        self.binary_debug.write('  eid=%i; %s\n' %
                                                (eid, str(out)))
                    obj.add(dt, word, energy, percent, density)
                    n += ntotal
        else:
            #device_code   = 1   Print
            #analysis_code = 5   Frequency
            #table_code    = 18  ONRGY1-OEE - Element strain energy
            #format_code   = 2   Real/Imaginary
            #sort_method   = 1
            #sort_code     = 0
            #sort_bits   = (0, 0, 0)
            #data_format = 0   Real
            #sort_type   = 0   Sort1
            #is_random   = 0   Sorted Responses
            #random_code   = 0
            #s_code        = None ???
            #num_wide      = 4
            #isubcase      = 1
            #MSC Nastran
            msg = self.code_information()
            return self._not_implemented_or_skip(data, ndata, msg)
            #raise NotImplementedError(self.code_information())
        return n
Esempio n. 36
0
    def _stream_unpack(self, st: struct.Struct) -> typing.Tuple:
        """Unpack data from the stream according to the struct st. The number of bytes to read is determined using st.size, so variable-sized structs cannot be used with this method."""

        return st.unpack(self._read(st.size))
Esempio n. 37
0
class Reader:
    """Reads the three files of a shapefile as a unit or
    separately.  If one of the three files (.shp, .shx,
    .dbf) is missing no exception is thrown until you try
    to call a method that depends on that particular file.
    The .shx index file is used if available for efficiency
    but is not required to read the geometry from the .shp
    file. The "shapefile" argument in the constructor is the
    name of the file you want to open.

    You can instantiate a Reader without specifying a shapefile
    and then specify one later with the load() method.

    Only the shapefile headers are read upon loading. Content
    within each file is only accessed when required and as
    efficiently as possible. Shapefiles are usually not large
    but they can be.
    """
    def __init__(self, *args, **kwargs):
        self.shp = None
        self.shx = None
        self.dbf = None
        self.shapeName = "Not specified"
        self._offsets = []
        self.shpLength = None
        self.numRecords = None
        self.fields = []
        self.__dbfHdrLength = 0
        # See if a shapefile name was passed as an argument
        if len(args) > 0:
            if is_string(args[0]):
                self.load(args[0])
                return
        if "shp" in kwargs.keys():
            if hasattr(kwargs["shp"], "read"):
                self.shp = kwargs["shp"]
                # Copy if required
                try:
                    self.shp.seek(0)
                except (NameError, io.UnsupportedOperation):
                    self.shp = io.BytesIO(self.shp.read())
            if "shx" in kwargs.keys():
                if hasattr(kwargs["shx"], "read"):
                    self.shx = kwargs["shx"]
                    # Copy if required
                    try:
                        self.shx.seek(0)
                    except (NameError, io.UnsupportedOperation):
                        self.shx = io.BytesIO(self.shx.read())
        if "dbf" in kwargs.keys():
            if hasattr(kwargs["dbf"], "read"):
                self.dbf = kwargs["dbf"]
                # Copy if required
                try:
                    self.dbf.seek(0)
                except (NameError, io.UnsupportedOperation):
                    self.dbf = io.BytesIO(self.dbf.read())
        if self.shp or self.dbf:
            self.load()
        else:
            raise ShapefileException(
                "Shapefile Reader requires a shapefile or file-like object.")

    def __len__(self):
        """Returns the number of shapes/records in the shapefile."""
        return self.numRecords

    def load(self, shapefile=None):
        """Opens a shapefile from a filename or file-like
        object. Normally this method would be called by the
        constructor with the file name as an argument."""
        if shapefile:
            (shapeName, ext) = os.path.splitext(shapefile)
            self.shapeName = shapeName
            try:
                self.shp = open("%s.shp" % shapeName, "rb")
            except IOError:
                pass
            try:
                self.shx = open("%s.shx" % shapeName, "rb")
            except IOError:
                pass
            try:
                self.dbf = open("%s.dbf" % shapeName, "rb")
            except IOError:
                pass
            if not (self.shp and self.dbf):
                raise ShapefileException("Unable to open %s.dbf or %s.shp." %
                                         (shapeName, shapeName))
        if self.shp:
            self.__shpHeader()
        if self.dbf:
            self.__dbfHeader()

    def __getFileObj(self, f):
        """Checks to see if the requested shapefile file object is
        available. If not a ShapefileException is raised."""
        if not f:
            raise ShapefileException(
                "Shapefile Reader requires a shapefile or file-like object.")
        if self.shp and self.shpLength is None:
            self.load()
        if self.dbf and len(self.fields) == 0:
            self.load()
        return f

    def __restrictIndex(self, i):
        """Provides list-like handling of a record index with a clearer
        error message if the index is out of bounds."""
        if self.numRecords:
            rmax = self.numRecords - 1
            if abs(i) > rmax:
                raise IndexError("Shape or Record index out of range.")
            if i < 0: i = range(self.numRecords)[i]
        return i

    def __shpHeader(self):
        """Reads the header information from a .shp or .shx file."""
        if not self.shp:
            raise ShapefileException(
                "Shapefile Reader requires a shapefile or file-like object. (no shp file found"
            )
        shp = self.shp
        # File length (16-bit word * 2 = bytes)
        shp.seek(24)
        self.shpLength = unpack(">i", shp.read(4))[0] * 2
        # Shape type
        shp.seek(32)
        self.shapeType = unpack("<i", shp.read(4))[0]
        # The shapefile's bounding box (lower left, upper right)
        self.bbox = _Array('d', unpack("<4d", shp.read(32)))
        # Elevation
        self.elevation = _Array('d', unpack("<2d", shp.read(16)))
        # Measure
        self.measure = _Array('d', unpack("<2d", shp.read(16)))

    def __shape(self):
        """Returns the header info and geometry for a single shape."""
        f = self.__getFileObj(self.shp)
        record = Shape()
        nParts = nPoints = zmin = zmax = mmin = mmax = None
        (recNum, recLength) = unpack(">2i", f.read(8))
        # Determine the start of the next record
        next = f.tell() + (2 * recLength)
        shapeType = unpack("<i", f.read(4))[0]
        record.shapeType = shapeType
        # For Null shapes create an empty points list for consistency
        if shapeType == 0:
            record.points = []
        # All shape types capable of having a bounding box
        elif shapeType in (3, 5, 8, 13, 15, 18, 23, 25, 28, 31):
            record.bbox = _Array('d', unpack("<4d", f.read(32)))
        # Shape types with parts
        if shapeType in (3, 5, 13, 15, 23, 25, 31):
            nParts = unpack("<i", f.read(4))[0]
        # Shape types with points
        if shapeType in (3, 5, 8, 13, 15, 18, 23, 25, 28, 31):
            nPoints = unpack("<i", f.read(4))[0]
        # Read parts
        if nParts:
            record.parts = _Array('i',
                                  unpack("<%si" % nParts, f.read(nParts * 4)))
        # Read part types for Multipatch - 31
        if shapeType == 31:
            record.partTypes = _Array(
                'i', unpack("<%si" % nParts, f.read(nParts * 4)))
        # Read points - produces a list of [x,y] values
        if nPoints:
            flat = unpack("<%sd" % (2 * nPoints), f.read(16 * nPoints))
            record.points = list(izip(*(iter(flat), ) * 2))
        # Read z extremes and values
        if shapeType in (13, 15, 18, 31):
            (zmin, zmax) = unpack("<2d", f.read(16))
            record.z = _Array('d', unpack("<%sd" % nPoints,
                                          f.read(nPoints * 8)))
        # Read m extremes and values if header m values do not equal 0.0
        if shapeType in (13, 15, 18, 23, 25, 28,
                         31) and not 0.0 in self.measure:
            (mmin, mmax) = unpack("<2d", f.read(16))
            # Measure values less than -10e38 are nodata values according to the spec
            record.m = []
            for m in _Array('d', unpack("<%sd" % nPoints,
                                        f.read(nPoints * 8))):
                if m > -10e38:
                    record.m.append(m)
                else:
                    record.m.append(None)
        # Read a single point
        if shapeType in (1, 11, 21):
            record.points = [_Array('d', unpack("<2d", f.read(16)))]
        # Read a single Z value
        if shapeType == 11:
            record.z = unpack("<d", f.read(8))
        # Read a single M value
        if shapeType in (11, 21):
            record.m = unpack("<d", f.read(8))
        # Seek to the end of this record as defined by the record header because
        # the shapefile spec doesn't require the actual content to meet the header
        # definition.  Probably allowed for lazy feature deletion.
        f.seek(next)
        return record

    def __shapeIndex(self, i=None):
        """Returns the offset in a .shp file for a shape based on information
        in the .shx index file."""
        shx = self.shx
        if not shx:
            return None
        if not self._offsets:
            # File length (16-bit word * 2 = bytes) - header length
            shx.seek(24)
            shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
            numRecords = shxRecordLength // 8
            # Jump to the first record.
            shx.seek(100)
            shxRecords = _Array('i')
            # Each offset consists of two nrs, only the first one matters
            shxRecords.fromfile(shx, 2 * numRecords)
            if sys.byteorder != 'big':
                shxRecords.byteswap()
            self._offsets = [2 * el for el in shxRecords[::2]]
        if not i == None:
            return self._offsets[i]

    def shape(self, i=0):
        """Returns a shape object for a shape in the the geometry
        record file."""
        shp = self.__getFileObj(self.shp)
        i = self.__restrictIndex(i)
        offset = self.__shapeIndex(i)
        if not offset:
            # Shx index not available so iterate the full list.
            for j, k in enumerate(self.iterShapes()):
                if j == i:
                    return k
        shp.seek(offset)
        return self.__shape()

    def shapes(self):
        """Returns all shapes in a shapefile."""
        shp = self.__getFileObj(self.shp)
        # Found shapefiles which report incorrect
        # shp file length in the header. Can't trust
        # that so we seek to the end of the file
        # and figure it out.
        shp.seek(0, 2)
        self.shpLength = shp.tell()
        shp.seek(100)
        shapes = []
        while shp.tell() < self.shpLength:
            shapes.append(self.__shape())
        return shapes

    def iterShapes(self):
        """Serves up shapes in a shapefile as an iterator. Useful
        for handling large shapefiles."""
        shp = self.__getFileObj(self.shp)
        shp.seek(0, 2)
        self.shpLength = shp.tell()
        shp.seek(100)
        while shp.tell() < self.shpLength:
            yield self.__shape()

    def __dbfHeader(self):
        """Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
        if not self.dbf:
            raise ShapefileException(
                "Shapefile Reader requires a shapefile or file-like object. (no dbf file found)"
            )
        dbf = self.dbf
        # read relevant header parts
        self.numRecords, self.__dbfHdrLength, self.__recordLength = \
                unpack("<xxxxLHH20x", dbf.read(32))
        # read fields
        numFields = (self.__dbfHdrLength - 33) // 32
        for field in range(numFields):
            fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
            name = 0
            idx = 0
            if b("\x00") in fieldDesc[name]:
                idx = fieldDesc[name].index(b("\x00"))
            else:
                idx = len(fieldDesc[name]) - 1
            fieldDesc[name] = fieldDesc[name][:idx]
            fieldDesc[name] = u(fieldDesc[name])
            fieldDesc[name] = fieldDesc[name].lstrip()
            fieldDesc[1] = u(fieldDesc[1])
            self.fields.append(fieldDesc)
        terminator = dbf.read(1)
        if terminator != b("\r"):
            raise ShapefileException(
                "Shapefile dbf header lacks expected terminator. (likely corrupt?)"
            )
        self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
        fmt, fmtSize = self.__recordFmt()
        self.__recStruct = Struct(fmt)

    def __recordFmt(self):
        """Calculates the format and size of a .dbf record."""
        if self.numRecords is None:
            self.__dbfHeader()
        fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
        fmtSize = calcsize(fmt)
        # total size of fields should add up to recordlength from the header
        while fmtSize < self.__recordLength:
            # if not, pad byte until reaches recordlength
            fmt += "x"
            fmtSize += 1
        return (fmt, fmtSize)

    def __record(self):
        """Reads and returns a dbf record row as a list of values."""
        f = self.__getFileObj(self.dbf)
        recordContents = self.__recStruct.unpack(f.read(self.__recStruct.size))
        if recordContents[0] != b(' '):
            # deleted record
            return None
        record = []
        for (name, typ, size, deci), value in zip(self.fields, recordContents):
            if name == 'DeletionFlag':
                continue
            elif typ in ("N", "F"):
                # numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.
                value = value.replace(b('\0'), b('')).strip()
                value = value.replace(b('*'),
                                      b(''))  # QGIS NULL is all '*' chars
                if value == b(''):
                    value = None
                elif deci:
                    try:
                        value = float(value)
                    except ValueError:
                        #not parseable as float, set to None
                        value = None
                else:
                    try:
                        value = int(value)
                    except ValueError:
                        #not parseable as int, set to None
                        value = None
            elif typ == 'D':
                # date: 8 bytes - date stored as a string in the format YYYYMMDD.
                if value.count(
                        b('0')) == len(value):  # QGIS NULL is all '0' chars
                    value = None
                else:
                    try:
                        y, m, d = int(value[:4]), int(value[4:6]), int(
                            value[6:8])
                        value = date(y, m, d)
                    except:
                        value = value.strip()
            elif typ == 'L':
                # logical: 1 byte - initialized to 0x20 (space) otherwise T or F.
                if value == b(" "):
                    value = None  # space means missing or not yet set
                else:
                    if value in b('YyTt1'):
                        value = True
                    elif value in b('NnFf0'):
                        value = False
                    else:
                        value = None  # unknown value is set to missing
            else:
                # anything else is forced to string/unicode
                value = u(value)
                value = value.strip()
            record.append(value)
        return record

    def record(self, i=0):
        """Returns a specific dbf record based on the supplied index."""
        f = self.__getFileObj(self.dbf)
        if self.numRecords is None:
            self.__dbfHeader()
        i = self.__restrictIndex(i)
        recSize = self.__recStruct.size
        f.seek(0)
        f.seek(self.__dbfHdrLength + (i * recSize))
        return self.__record()

    def records(self):
        """Returns all records in a dbf file."""
        if self.numRecords is None:
            self.__dbfHeader()
        records = []
        f = self.__getFileObj(self.dbf)
        f.seek(self.__dbfHdrLength)
        for i in range(self.numRecords):
            r = self.__record()
            if r:
                records.append(r)
        return records

    def iterRecords(self):
        """Serves up records in a dbf file as an iterator.
        Useful for large shapefiles or dbf files."""
        if self.numRecords is None:
            self.__dbfHeader()
        f = self.__getFileObj(self.dbf)
        f.seek(self.__dbfHdrLength)
        for i in xrange(self.numRecords):
            r = self.__record()
            if r:
                yield r

    def shapeRecord(self, i=0):
        """Returns a combination geometry and attribute record for the
        supplied record index."""
        i = self.__restrictIndex(i)
        return ShapeRecord(shape=self.shape(i), record=self.record(i))

    def shapeRecords(self):
        """Returns a list of combination geometry/attribute records for
        all records in a shapefile."""
        shapeRecords = []
        return [ShapeRecord(shape=rec[0], record=rec[1]) \
                                for rec in zip(self.shapes(), self.records())]

    def iterShapeRecords(self):
        """Returns a generator of combination geometry/attribute records for
        all records in a shapefile."""
        for shape, record in izip(self.iterShapes(), self.iterRecords()):
            yield ShapeRecord(shape=shape, record=record)
Esempio n. 38
0
def data_element_generator(fp,
                           is_implicit_VR,
                           is_little_endian,
                           stop_when=None,
                           defer_size=None,
                           encoding=default_encoding):
    """Create a generator to efficiently return the raw data elements
    Returns (VR, length, raw_bytes, value_tell, is_little_endian),
    where:
    VR -- None if implicit VR, otherwise the VR read from the file
    length -- the length as in the DICOM data element (could be
        DICOM "undefined length" 0xffffffffL),
    value_bytes -- the raw bytes from the DICOM file
                    (not parsed into python types)
    is_little_endian -- True if transfer syntax is little endian; else False
    """
    # Summary of DICOM standard PS3.5-2008 chapter 7:
    # If Implicit VR, data element is:
    #    tag, 4-byte length, value.
    #       The 4-byte length can be FFFFFFFF (undefined length)*
    # If Explicit VR:
    #    if OB, OW, OF, SQ, UN, or UT:
    #       tag, VR, 2-bytes reserved (both zero), 4-byte length, value
    #           For all but UT, the length can be FFFFFFFF (undefined length)*
    #   else: (any other VR)
    #       tag, VR, (2 byte length), value
    # * for undefined length, a Sequence Delimitation Item marks the end
    #        of the Value Field.
    # Note, except for the special_VRs, both impl and expl VR use 8 bytes;
    #    the special VRs follow the 8 bytes with a 4-byte length

    # With a generator, state is stored, so we can break down
    #    into the individual cases, and not have to check them again for each
    #    data element

    if is_little_endian:
        endian_chr = "<"
    else:
        endian_chr = ">"
    if is_implicit_VR:
        element_struct = Struct(endian_chr + "HHL")
    else:  # Explicit VR
        # tag, VR, 2-byte length (or 0 if special VRs)
        element_struct = Struct(endian_chr + "HH2sH")
        extra_length_struct = Struct(endian_chr + "L")  # for special VRs
        extra_length_unpack = extra_length_struct.unpack  # for lookup speed

    # Make local variables so have faster lookup
    fp_read = fp.read
    fp_tell = fp.tell
    logger_debug = logger.debug
    debugging = dicom.debugging
    element_struct_unpack = element_struct.unpack

    while True:
        # Read tag, VR, length, get ready to read value
        bytes_read = fp_read(8)
        if len(bytes_read) < 8:
            raise StopIteration  # at end of file
        if debugging:
            debug_msg = "{0:08x}: {1}".format(fp.tell() - 8,
                                              bytes2hex(bytes_read))

        if is_implicit_VR:
            # must reset VR each time; could have set last iteration (e.g. SQ)
            VR = None
            group, elem, length = element_struct_unpack(bytes_read)
        else:  # explicit VR
            group, elem, VR, length = element_struct_unpack(bytes_read)
            if in_py3:
                VR = VR.decode(default_encoding)
            if VR in extra_length_VRs:
                bytes_read = fp_read(4)
                length = extra_length_unpack(bytes_read)[0]
                if debugging:
                    debug_msg += " " + bytes2hex(bytes_read)
        if debugging:
            debug_msg = "%-47s  (%04x, %04x)" % (debug_msg, group, elem)
            if not is_implicit_VR:
                debug_msg += " %s " % VR
            if length != 0xFFFFFFFF:
                debug_msg += "Length: %d" % length
            else:
                debug_msg += "Length: Undefined length (FFFFFFFF)"
            logger_debug(debug_msg)

        # Positioned to read the value, but may not want to -- check stop_when
        value_tell = fp_tell()
        tag = TupleTag((group, elem))
        if stop_when is not None:
            # XXX VR may be None here!! Should stop_when just take tag?
            if stop_when(tag, VR, length):
                if debugging:
                    logger_debug("Reading ended by stop_when callback. "
                                 "Rewinding to start of data element.")
                rewind_length = 8
                if not is_implicit_VR and VR in extra_length_VRs:
                    rewind_length += 4
                fp.seek(value_tell - rewind_length)
                raise StopIteration

        # Reading the value
        # First case (most common): reading a value with a defined length
        if length != 0xFFFFFFFF:
            if defer_size is not None and length > defer_size:
                # Flag as deferred by setting value to None, and skip bytes
                value = None
                logger_debug("Defer size exceeded. "
                             "Skipping forward to next data element.")
                fp.seek(fp_tell() + length)
            else:
                value = fp_read(length)
                if debugging:
                    dotdot = "   "
                    if length > 12:
                        dotdot = "..."
                    logger_debug("%08x: %-34s %s %r %s" %
                                 (value_tell, bytes2hex(
                                     value[:12]), dotdot, value[:12], dotdot))

            # If the tag is (0008,0005) Specific Character Set, then store it
            if tag == (0x08, 0x05):
                from dicom.values import convert_string
                encoding = convert_string(value,
                                          is_little_endian,
                                          encoding=default_encoding)
                # Store the encoding value in the generator for use with future elements (SQs)
                encoding = convert_encodings(encoding)

            yield RawDataElement(tag, VR, length, value, value_tell,
                                 is_implicit_VR, is_little_endian)

        # Second case: undefined length - must seek to delimiter,
        # unless is SQ type, in which case is easier to parse it, because
        # undefined length SQs and items of undefined lengths can be nested
        # and it would be error-prone to read to the correct outer delimiter
        else:
            # Try to look up type to see if is a SQ
            # if private tag, won't be able to look it up in dictionary,
            #   in which case just ignore it and read the bytes unless it is
            #   identified as a Sequence
            if VR is None:
                try:
                    VR = dictionaryVR(tag)
                except KeyError:
                    # Look ahead to see if it consists of items and is thus a SQ
                    next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
                    # Rewind the file
                    fp.seek(fp_tell() - 4)
                    if next_tag == ItemTag:
                        VR = 'SQ'

            if VR == 'SQ':
                if debugging:
                    msg = "{0:08x}: Reading/parsing undefined length sequence"
                    logger_debug(msg.format(fp_tell()))
                seq = read_sequence(fp, is_implicit_VR, is_little_endian,
                                    length, encoding)
                yield DataElement(tag,
                                  VR,
                                  seq,
                                  value_tell,
                                  is_undefined_length=True)
            else:
                delimiter = SequenceDelimiterTag
                if debugging:
                    logger_debug("Reading undefined length data element")
                value = read_undefined_length_value(fp, is_little_endian,
                                                    delimiter, defer_size)

                # If the tag is (0008,0005) Specific Character Set, then store it
                if tag == (0x08, 0x05):
                    from dicom.values import convert_string
                    encoding = convert_string(value,
                                              is_little_endian,
                                              encoding=default_encoding)
                    # Store the encoding value in the generator for use with future elements (SQs)
                    encoding = convert_encodings(encoding)

                yield RawDataElement(tag, VR, length, value, value_tell,
                                     is_implicit_VR, is_little_endian)
Esempio n. 39
0
import asyncio
import functools

from struct import Struct

from .utils import asyncio_serial

from .bcd import bcd_to_integer, integer_to_bcd
from .rigcap import VFO

CIV_Header_Fmt = Struct('HBBB')
CIV_Footer = b'\xFD'
Byte_Encode = Struct('B')


class CIV_Header:
    @staticmethod
    def size():
        return CIV_Header_Fmt.size

    @staticmethod
    def parse(buf):
        (preamble, dest, src, cmd) = CIV_Header_Fmt.unpack_from(buf)
        assert (preamble == 0xFEFE)
        return dict(dest=dest, src=src, cmd=cmd)

    @staticmethod
    def fmt(dest, src, cmd, subcmd=None):
        hdr = CIV_Header_Fmt.pack(0xFEFE, dest, src, cmd)
        if subcmd is not None:
            hdr += Byte_Encode.pack(subcmd)
Esempio n. 40
0
class joe_frame:
    __slots__ = 'num_vertices', 'num_normals', 'num_texcoords',\
       'faces', 'verts', 'texcoords', 'normals'
    bstruct = Struct('<3i')

    def __init__(self):
        self.num_vertices = 0
        self.num_texcoords = 0
        self.num_normals = 0
        self.faces = []
        self.verts = []
        self.texcoords = []
        self.normals = []

    def load(self, file):
        # header
        data = file.read(joe_frame.bstruct.size)
        v = joe_frame.bstruct.unpack(data)
        self.num_vertices = v[0]
        self.num_texcoords = v[1]
        self.num_normals = v[2]
        # mesh data
        self.verts = joe_vertex.read(self.num_vertices, file)
        self.normals = joe_vertex.read(self.num_normals, file)
        self.texcoords = joe_texcoord.read(self.num_texcoords, file)
        return self

    def save(self, file):
        # header
        data = joe_frame.bstruct.pack(self.num_vertices, self.num_texcoords,
                                      self.num_normals)
        file.write(data)
        # mesh data
        joe_vertex.write(self.verts, file)
        joe_vertex.write(self.normals, file)
        joe_texcoord.write(self.texcoords, file)

    def from_mesh(self, obj):
        mesh = obj.data
        if obj.matrix_world != Matrix.Identity(4):
            mesh = obj.data.copy()
            mesh.transform(obj.matrix_world)
        if not mesh.tessfaces:
            mesh.calc_tessface()
        normals = util.indexed_set()
        vertices = util.indexed_set()
        texcoords = util.indexed_set()
        # get vertices and normals
        mvertices = mesh.vertices
        mtexcoords = mesh.tessface_uv_textures[0].data
        for fi, f in enumerate(mesh.tessfaces):
            uv = mtexcoords[fi].uv_raw
            if f.vertices_raw[3] != 0:
                # split quad in two tris
                d0 = (Vector(mvertices[2].co) -
                      Vector(mvertices[0].co)).length_squared
                d1 = (Vector(mvertices[3].co) -
                      Vector(mvertices[1].co)).length_squared
                if d0 < d1:
                    vi1, vi2 = (0, 1, 2), (2, 3, 0)
                else:
                    vi1, vi2 = (1, 2, 3), (3, 0, 1)
                jf = joe_face()
                jf.vertex_index = [
                    vertices.get(mvertices[f.vertices_raw[i]].co) for i in vi2
                ]
                if f.use_smooth:
                    jf.normal_index = [
                        normals.get(mvertices[f.vertices_raw[i]].normal)
                        for i in vi2
                    ]
                else:
                    jf.normal_index = [normals.get(f.normal)] * 3
                jf.texture_index = [
                    texcoords.get((uv[i * 2], uv[i * 2 + 1])) for i in vi2
                ]
                self.faces.append(jf)
            else:
                vi1 = (0, 1, 2)
            jf = joe_face()
            jf.vertex_index = [
                vertices.get(mvertices[f.vertices_raw[i]].co) for i in vi1
            ]
            if f.use_smooth:
                jf.normal_index = [
                    normals.get(mvertices[f.vertices_raw[i]].normal)
                    for i in vi1
                ]
            else:
                jf.normal_index = [normals.get(f.normal)] * 3
            jf.texture_index = [
                texcoords.get((uv[i * 2], uv[i * 2 + 1])) for i in vi1
            ]
            self.faces.append(jf)
        self.normals = normals.list
        self.verts = vertices.list
        self.texcoords = texcoords.list
        self.num_normals = len(self.normals)
        self.num_texcoords = len(self.texcoords)
        self.num_vertices = len(self.verts)
        return self

    # remove faces consisting less then 3 vertices
    def remove_degenerate_faces(self):
        faces = []
        for f in self.faces:
            vi = f.vertex_index
            if vi[0] != vi[1] and vi[1] != vi[2] and vi[0] != vi[2]:
                faces.append(f)
        self.faces = faces

    # blender only supports one normal per vertex
    def duplicate_verts_with_multiple_normals(self):
        face_vert = {}
        verts = []
        for f in self.faces:
            for i in range(3):
                vn = f.vertex_index[i], f.normal_index[i]
                if vn not in face_vert:
                    verts.append(self.verts[f.vertex_index[i]])
                    vi = len(verts) - 1
                    f.vertex_index[i] = vi
                    face_vert[vn] = vi
                else:
                    f.vertex_index[i] = face_vert[vn]
        self.verts = verts

    # in blender 2.5 the last vertex index shall not be 0
    def swizzle_face_vertices(self):
        for f in self.faces:
            vi = f.vertex_index
            ni = f.normal_index
            ti = f.texture_index
            if vi[2] == 0:
                vi[0], vi[1], vi[2] = vi[2], vi[0], vi[1]
                ni[0], ni[1], ni[2] = ni[2], ni[0], ni[1]
                ti[0], ti[1], ti[2] = ti[2], ti[0], ti[1]

    def to_mesh(self, name, image):
        # cleanup joe
        self.remove_degenerate_faces()
        self.swizzle_face_vertices()
        self.duplicate_verts_with_multiple_normals()

        # new mesh
        mesh = bpy.data.meshes.new(name)
        mesh.vertices.add(len(self.verts))
        mesh.tessfaces.add(len(self.faces))

        # set vertices
        for i, v in enumerate(self.verts):
            mesh.vertices[i].co = v
        for f in self.faces:
            for i in range(3):
                mesh.vertices[f.vertex_index[i]].normal = self.normals[
                    f.normal_index[i]]

        # set faces
        for i, f in enumerate(self.faces):
            mesh.tessfaces[i].vertices = (f.vertex_index[0], f.vertex_index[1],
                                          f.vertex_index[2], 0)
            mesh.tessfaces[i].use_smooth = True

        # set texture coordinates
        if self.num_texcoords == 0:
            print("Warning! Mesh has no texture coordinates.")
        else:
            mesh.tessface_uv_textures.new()
            for i, f in enumerate(self.faces):
                mf = mesh.tessface_uv_textures[0].data[i]
                mf.uv1 = self.texcoords[f.texture_index[0]]
                mf.uv2 = self.texcoords[f.texture_index[1]]
                mf.uv3 = self.texcoords[f.texture_index[2]]
                if (image): mf.image = image

        mesh.validate()
        mesh.update()
        object = bpy.data.objects.new(name, mesh)
        bpy.context.scene.objects.link(object)
        return object
Esempio n. 41
0
class OverflowPage(Page):
    """
    TODO: It might be worthwhile to write the current offset into the page header
          as a sanity check.
    """

    MAGIC = b"oVeRfLoW"
    STRUCT = Struct("! 8s I 4084x")
    # this is the size of the data portion of an overflow page
    HEADERSIZE = 12
    DATASIZE = PAGESIZE - HEADERSIZE

    def __init__(self, next_overflow_pageno, overflow_data):
        super().__init__()
        self.next_overflow_pageno = next_overflow_pageno
        self.overflow_data = overflow_data

    def pack(self) -> bytes:
        return self.STRUCT.pack(
            self.MAGIC,
            self.next_overflow_pageno,
            self.overflow_data,
        )

    def pack_into(self, buffer: WriteableBuffer, offset: int):
        self.STRUCT.pack_into(
            buffer,
            offset,
            self.MAGIC,
            self.next_overflow_pageno,
            self.overflow_data,
        )

    def read_start(self, offset: int) -> Tuple[bytes, int]:
        """Read the data from offset

        Returns the data read so far, and how much needs to be read from the next
        overflow page.

        Offsets are relative to the end of the header, but because we store the data in
        a separate array, we don't have to do any math.
        """

        # FIXME: use a destination WriteableBuffer instead of return `bytes`.

        # We need to read the size of the data.
        # If there isn't at least 4 bytes left on the page, something is wrong.
        if offset >= self.DATASIZE - 4:
            raise ValueError("offset too close to end of overflow page")

        size = unpack_from("!I", self.overflow_data, offset)[0]

        # advance over the size data
        offset += 4

        return self.read_continue(offset, size)

    def read_continue(self, offset: int, size: int) -> Tuple[bytes, int]:
        """Continue a read begun by `read_start`.

        Retuns the data we were table to read from the current page,
        and the amount of data that needs to be read from the next overflow page.

        Allocating a new overflow page (if necessary) is the caller's
        responsilibity.
        """
        if offset == self.DATASIZE:
            return (b"", size)  # all the data is on the next page.
        if offset + size > self.DATASIZE:
            # read all the data on this page, but there is more to be read.
            data = self.overflow_data[offset:self.DATASIZE]
            return (data, offset + size - self.DATASIZE)
        else:
            # read the remaining data
            data = self.overflow_data[offset:offset + size]
            return (data, 0)

    def write_start(self, data: ReadableBuffer,
                    offset: int) -> Optional[ReadableBuffer]:
        # FIXME: read_start expects at least 4 bytes in the first page.
        #        We need to account for that here somehow.

        # prefix the data with its length
        data = length_prefix(data)
        end = offset + len(data)

        if end > self.DATASIZE:
            # the data doesn't fit on this page alone.
            # split it and return what we can't fit.
            split = self.DATASIZE - offset
            self.overflow_data[offset:split] = data[:split]
            return data[split:]

        # the data fits
        self.overflow_data[offset:end] = data
        return None  # no more data to write!

    def write_continue(self, data: ReadableBuffer) -> Optional[ReadableBuffer]:
        """Continue writing incomplete data.

        Offset is always zero. If we're continuing to write, that means we've
        filled a page and had to start a new one.

        Write everything we can, return the rest. It's the caller's
        responsibility to create a new overflow page if necessary.
        """
        # FIXME: record page offset after write?!?!?

        if len(data) <= self.DATASIZE:
            self.overflow_data[:len(data)] = data
            return None
        else:
            self.overflow_data[:len(data)] = data[:self.DATASIZE]
            return data[self.DATASIZE:]
Esempio n. 42
0
    def write_op2(self, op2, op2_ascii, itable, new_result,
                  date, is_mag_phase=False, endian='>'):
        """writes an OP2"""
        import inspect
        from struct import Struct, pack
        frame = inspect.currentframe()
        call_frame = inspect.getouterframes(frame, 2)
        op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))

        if itable == -1:
            self._write_table_header(op2, op2_ascii, date)
            itable = -3

        #print("nnodes_all =", nnodes_all)
        #msg.append('  element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
        #msg.append('  data.shape=%s\n' % str(self.data.shape).replace('L', ''))

        eids = self.element_layer[:, 0]
        layers = self.element_layer[:, 1]
        eids_device = eids * 10 + self.device_code

        nelements = len(np.unique(eids))
        #print('nelements =', nelements)
        # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
        #ntotal = ((nnodes * 21) + 1) + (nelements * 4)

        ntotali = self.num_wide
        nlayers = self.data.shape[1]
        ntotal = ntotali * nlayers

        #print('shape = %s' % str(self.data.shape))
        #assert self.ntimes == 1, self.ntimes

        device_code = self.device_code
        op2_ascii.write('  ntimes = %s\n' % self.ntimes)

        #fmt = '%2i %6f'
        #print('ntotal=%s' % (ntotal))
        #assert ntotal == 193, ntotal

        #[fiber_dist, oxx, oyy, txy, angle, majorP, minorP, ovm]
        op2_ascii.write('  #elementi = [eid_device, fd1, sx1, sy1, txy1, angle1, major1, minor1, vm1,\n')
        op2_ascii.write('  #                        fd2, sx2, sy2, txy2, angle2, major2, minor2, vm2,]\n')

        if self.is_sort1:
            struct1 = Struct(endian + b'i16f')
        else:
            raise NotImplementedError('SORT2')

        op2_ascii.write('nelements=%i\n' % nelements)

        ntimes = self.data.shape[0]

        for itime in range(ntimes):
            nwide = 0
            self._write_table_3(op2, op2_ascii, new_result, itable, itime)

            # record 4
            #print('stress itable = %s' % itable)
            itable -= 1
            header = [4, itable, 4,
                      4, 1, 4,
                      4, 0, 4,
                      4, ntotal, 4,
                      4 * ntotal]
            op2.write(pack('%ii' % len(header), *header))
            op2_ascii.write('r4 [4, 0, 4]\n')
            op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
            op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))

            #dt = self._times[itime]
            #header = _eigenvalue_header(self, header, itime, ntimes, dt)
            #f06_file.write(''.join(header + msg))

            #[o11, o22, t12, t1z, t2z, angle, major, minor, ovm]
            o11 = self.data[itime, :, 0]
            o22 = self.data[itime, :, 1]
            t12 = self.data[itime, :, 2]
            t1z = self.data[itime, :, 3]
            t2z = self.data[itime, :, 4]
            angle = self.data[itime, :, 5]
            major = self.data[itime, :, 6]
            minor = self.data[itime, :, 7]
            ovm = self.data[itime, :, 8]

            for eid_device, eid, layer, o11i, o22i, t12i, t1zi, t2zi, anglei, majori, minori, ovmi in zip(
                eids_device, eids, layers, o11, o22, t12, t1z, t2z, angle, major, minor, ovm):

                data = [eid_device, layer, o11i, o22i, t12i, t1zi, t2zi, anglei, majori, minori, ovmi]
                op2.write(pack('2i 9f', *data))

                [o11i, o22i, t12i, t1zi, t2zi, majori, minori, ovmi] = write_floats_12e([
                 o11i, o22i, t12i, t1zi, t2zi, majori, minori, ovmi])
                op2_ascii.write('0 %8s %4s  %12s %12s %12s   %12s %12s  %6.2F %12s %12s %s\n'
                                % (eid, layer, o11i, o22i, t12i, t1zi, t2zi, anglei, majori, minori, ovmi))

                nwide += len(data)

            assert nwide == ntotal, "nwide=%s ntotal=%s" % (nwide, ntotal)
            itable -= 1
            header = [4 * ntotal,]
            op2.write(pack('i', *header))
            op2_ascii.write('footer = %s\n' % header)
            new_result = False
        return itable
Esempio n. 43
0
def write_records(records, f, format="<idd"):
    # 开一个结构体文件,对元组每一项分别添加
    record_struct = Struct(format)
    for r in records:
        f.write(record_struct.pack(*r))
Esempio n. 44
0
    CONTINUATION = 0x0
    TEXT = 0x1
    BINARY = 0x2
    PING = 0x9
    PONG = 0xa
    CLOSE = 0x8

    # aiohttp specific types
    CLOSING = 0x100
    CLOSED = 0x101
    ERROR = 0x102


WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'

UNPACK_LEN2 = Struct('!H').unpack_from
UNPACK_LEN3 = Struct('!Q').unpack_from
UNPACK_CLOSE_CODE = Struct('!H').unpack
PACK_LEN1 = Struct('!BB').pack
PACK_LEN2 = Struct('!BBH').pack
PACK_LEN3 = Struct('!BBQ').pack
PACK_CLOSE_CODE = Struct('!H').pack
MSG_SIZE = 2**14
DEFAULT_LIMIT = 2**16

_WSMessageBase = collections.namedtuple('_WSMessageBase',
                                        ['type', 'data', 'extra'])


class WSMessage(_WSMessageBase):
    def json(self, *, loads: Callable[[Any], Any] = json.loads) -> Any:
Esempio n. 45
0
from ws4py.server.wsgirefserver import (
    WSGIServer, WebSocketWSGIHandler, WSGIRequestHandler
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication

###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
###########################################


class StreamingHttpHandler(BaseHTTPRequestHandler):
    def do_HEAD(self):
        self.do_GET()

    def do_GET(self):
        if self.path == '/':
            self.send_response(301)
            self.send_header('Location', '/index.html')
            self.end_headers()
            return
        elif self.path == '/jsmpg.js':
            content_type = 'application/javascript'
Esempio n. 46
0
class Pager:
    """Abstract all file reads and writes. Manage free space.

    TODO: add root entry to zero page
    TODO: support concurrent access

    Zero Page Format:
        u8[8]       ZERO_MAGIC
        u32         next free pageno
        u32         next overflow pageno
        u32         current overflow pageno (initially 0)
        u16         current overflow offset (initially 22)
        u8[4074]    overflow

    Free Page Format:
        u8[8]      FREE_MAGIC
        u32        next free pageno
        u8[4080]   padding
    """

    PAGESIZE = 0x1000
    ZERO_MAGIC = b"\xabZEROPG\xcd"  # 8 bytes
    FREE_MAGIC = b"\xdcFREEPG\xba"  # 8 bytes
    assert len(ZERO_MAGIC) == len(FREE_MAGIC)
    PAGE_FORMAT = Struct(f"!8sQ{PAGESIZE-16}x")
    assert PAGE_FORMAT.size == PAGESIZE

    @staticmethod
    def open_file(filepath: Union[AnyStr, os.PathLike]):
        if os.path.exists(filepath):
            mode = "r+b"
        else:
            mode = "w+b"
        return Pager(open(filepath, mode))

    def __init__(self, file: BinaryIO):
        """Create a new pager for `file`.

        `file` should either be blank or have a zero page.
        Anything else is an error.
        """
        if file.closed or not (file.readable() and file.writable()):
            raise ValueError(
                "Pager requires a file that is open, readable, and writeable")
        self.file = file
        if self._seek_end() == 0:
            # blank file, create zero page.
            page = self.PAGE_FORMAT.pack(self.ZERO_MAGIC, 0)
            file.write(page)

        self._read_zero_page()
        self.cache = LRUCache(maxsize=32)

    def _seek_end(self) -> int:
        """Seek to the end of the file and return the position."""
        return self.file.seek(0, os.SEEK_END)

    def _seek_page(self, pageno: int):
        """Seek to the page given by `pageno`."""
        offset = pageno * self.PAGESIZE
        end = self._seek_end()
        if offset + self.PAGESIZE > end:
            raise ValueError("pageno out of bounds")
        self.file.seek(offset)

    def _read_zero_page(self):
        self._seek_page(0)
        data = self.PAGE_FORMAT.unpack(self.file.read(self.PAGESIZE))
        if data[0] != self.ZERO_MAGIC:
            raise RuntimeError("Bad MAGIC on zero page")
        self.next_free_pageno = data[1]

    def read_page(self, pageno: int, use_cache=True) -> bytes:
        """Fetch a page from the file.

        :param pageno: The page number to fetch.
        :type pageno: int
        :returns: The contents of the page as a bytes object.
        :rtype: bytes
        """

        if use_cache:
            data = self.cache.get(pageno)
            if data is not None:
                return data

        self._seek_page(pageno)
        data = self.file.read(self.PAGESIZE)
        assert len(data) == self.PAGESIZE

        self.cache.set(pageno, data)

        return data

    def write_page(self, pageno: int, data: bytes):
        """Write new page data.

        :param pageno: The pageno to write to.
        :type pageno: int
        :param data: The data to write.
        :type data: bytes
        """

        assert len(data) == self.PAGESIZE

        self._seek_page(pageno)
        count = self.file.write(data)
        if count != self.PAGESIZE:
            raise IOError("Incomplete page write")
        self.file.flush()
        self.cache.delete(pageno)

    def alloc_page(self) -> int:
        """Allocate a new page.

        :return: The page number of the new page.
        :rtype: int
        """

        if self.next_free_pageno != 0:
            # we have a previously allocated page we can use.
            pageno = self.next_free_pageno
            data = self.PAGE_FORMAT.unpack(self.read_page(pageno))
            if data[0] != self.FREE_MAGIC:
                raise RuntimeError("invalid free page format: bad magic")
            self._write_next_free_pageno(data[1])
        else:
            pageno = self._alloc_fresh_page()

        return pageno

    def _alloc_fresh_page(self) -> int:
        """Allocate a fresh page at the end of the file."""

        # find the end of the file
        next_page = self._seek_end()

        # align with page boundary
        if next_page % self.PAGESIZE != 0:
            next_page = (next_page & ~(self.PAGESIZE - 1)) + self.PAGESIZE
            assert next_page % self.PAGESIZE == 0
            # next_page is an offset not a pageno!!!
            self.file.seek(next_page)

        # write a blank page
        self.file.write(b"\x00" * self.PAGESIZE)

        # return the page number
        return next_page // self.PAGESIZE

    def free_page(self, pageno: int):
        """Free the given page."""

        # clear the cache
        self.cache.delete(pageno)

        # clear the page and write the pointer to the next free page.
        data = self.PAGE_FORMAT.pack(self.FREE_MAGIC, self.next_free_pageno)
        self.write_page(pageno, data)
        # commit the next free page to the zero page.
        self._write_next_free_pageno(pageno)

    def _write_next_free_pageno(self, pageno: int):
        """Commit the first free pageno to the zero page."""
        data = self.PAGE_FORMAT.pack(self.ZERO_MAGIC, pageno)
        self.write_page(0, data)
        self.next_free_pageno = pageno

    def close(self):
        """Close the pager and its underlying file object."""
        self.file.flush()
        self.file.close()

    def read_overflow(self, pageno: int, offset: int) -> bytes:
        """Read the overflow data that begins at pageno and offset."""
        # FIXME: use buffers instead of appending `bytes` objects.

        # fetch the overflow page
        page = OverflowPage.from_page(self.get_page(pageno))
        data = bytes()

        # read everything we can from the first page.
        current_data, togo = page.read_start(offset)
        data += current_data

        while togo > 0:
            # we still have data to read, so fetch the next overflow page.
            page = OverflowPage.from_page(
                self.get_page(page.next_overflow_pageno))
            # continue reading the data from the start of the next overflow page.
            current_data, togo = page.read_continue(0, togo)
            # append new data
            data += current_data

        return data
Esempio n. 47
0
class GPT(object):
    """
	Class for handling of GUID Partition Tables
	(https://en.wikipedia.org/wiki/GUID_Partition_Table)
	"""

    _gpt_header = b"EFI PART"
    _gpt_size = 0x5C  # default, can be overridden by headerSize

    # Format string dict
    #   itemName is the new dict key for the data to be stored under
    #   formatString is the Python formatstring for struct.unpack()
    # Example:
    #   ('itemName', ('formatString'))
    _gpt_head_fmt = OrderedDict([
        ('header', ('8s')),  # magic number
        ('revision', ('I')),  # actually 2 shorts, Struct...
        ('headerSize', ('I')),
        ('crc32', ('I')),
        ('reserved', ('I')),
        ('myLBA', ('Q')),
        ('altLBA', ('Q')),
        ('dataStartLBA', ('Q')),
        ('dataEndLBA', ('Q')),
        ('uuid', ('16s')),
        ('entryStart', ('Q')),
        ('entryCount', ('I')),
        ('entrySize', ('I')),
        ('entryCrc32', ('I')),
    ])

    # Generate the formatstring for struct.unpack()
    _gpt_struct = Struct("<" + "".join([x for x in _gpt_head_fmt.values()]))

    def display(self):
        """
		Display the data in the particular GPT
		"""

        verbose("")

        verbose("device UUID={:s}".format(str(self.uuid)))
        verbose("myLBA={:d} alternateLBA={:d}".format(self.myLBA, self.altLBA))
        verbose("firstUsableLBA={:d} lastUsableLBA={:d}".format(
            self.dataStartLBA, self.dataEndLBA))

        verbose("")

        if self.myLBA == 1:
            if self.entryStart != 2:
                verbose(
                    "Note: {:d} unused blocks between GPT header and entry table"
                    .format(self.entryStart - 2))
            endEntry = self.entryStart + (
                (self.entrySize * self.entryCount +
                 (1 << self.shiftLBA) - 1) >> self.shiftLBA)
            if endEntry < self.dataStartLBA:
                verbose(
                    "Note: {:d} unused slice entry blocks before first usable block"
                    .format(self.dataStartLBA - endEntry))
        else:
            if self.entryStart != self.dataEndLBA + 1:
                verbose(
                    "Note: {:d} unused slice entry blocks after last usable block"
                    .format(self.entryStart - self.dataEndLBA - 1))
            endEntry = self.entryStart + (
                (self.entrySize * self.entryCount +
                 (1 << self.shiftLBA) - 1) >> self.shiftLBA)
            if endEntry < self.myLBA - 1:
                verbose(
                    "Note: {:d} unused blocks between GPT header and entry table"
                    .format(self.myLBA - endEntry + 1))

        current = self.dataStartLBA
        for slice in self.slices:
            if slice.startLBA != current:
                verbose("Note: non-contiguous ({:d} unused)".format(
                    slice.startLBA - current))
            slice.display()
            current = slice.endLBA + 1
        current -= 1
        if self.dataEndLBA != current:
            verbose("Note: empty LBAs at end ({:d} unused)".format(
                self.dataEndLBA - current))

    def tryParseHeader(self, buf):
        """
		Try to parse a buffer as a GPT header, return None on failure
		"""

        data = dict(
            zip(self._gpt_head_fmt.keys(),
                self._gpt_struct.unpack(buf[0:self._gpt_size])))

        if data['header'] != self._gpt_header:
            return None

        tmp = data['crc32']
        data['crc32'] = 0

        crc = crc32(
            self._gpt_struct.pack(
                *[data[k] for k in self._gpt_head_fmt.keys()]))

        data['crc32'] = tmp

        # just in case future ones are larger
        crc = crc32(buf[self._gpt_size:data['headerSize']], crc)
        crc &= 0xFFFFFFFF

        if crc != data['crc32']:
            verbose("Warning: Found GPT candidate with bad CRC")
            return None

        return data

    def __init__(self, buf, lbaMinShift=9, lbaMaxShift=16):
        """
		Initialize the GPT class
		"""

        # sanity checking
        if self._gpt_struct.size != self._gpt_size:
            raise NoGPT("GPT format string wrong!")

        # we assume we're searching, start with the bottom end
        shiftLBA = lbaMinShift
        lbaSize = 1 << shiftLBA

        shiftLBA -= 1
        # search for the GPT, since block size is unknown
        while shiftLBA < lbaMaxShift:
            # non power of 2 sizes are illegal
            shiftLBA += 1
            lbaSize = 1 << shiftLBA

            # try for a primary GPT
            hbuf = buf[lbaSize:lbaSize << 1]

            data = self.tryParseHeader(hbuf)

            if data:
                verbose("Found Primary GPT")
                break

            # try for a backup GPT
            hbuf = buf[-lbaSize:]

            data = self.tryParseHeader(hbuf)

            if data:
                verbose("Found Backup GPT")
                break

        else:
            raise NoGPT("Failed to locate GPT")

        self.shiftLBA = shiftLBA

        if data['reserved'] != 0:
            verbose("Warning: Reserved area non-zero")

        self.revision = data['revision']
        self.headerSize = data['headerSize']
        self.reserved = data['reserved']
        self.myLBA = data['myLBA']
        self.altLBA = data['altLBA']
        self.dataStartLBA = data['dataStartLBA']
        self.dataEndLBA = data['dataEndLBA']
        self.uuid = UUID(bytes_le=data['uuid'])
        self.entryStart = data['entryStart']
        self.entryCount = data['entryCount']
        self.entrySize = data['entrySize']
        self.entryCrc32 = data['entryCrc32']

        if self.revision >> 16 != 1:
            raise NoGPT("Error: GPT major version isn't 1")
        elif self.revision & 0xFFFF != 0:
            verbose("Warning: Newer GPT revision")

        # these tests were against our version and may well fail
        elif self.reserved != 0:
            verbose("Warning: Reserved area non-zero")

        # this is an error according to the specs
        if (self.myLBA != 1) and (self.altLBA != 1):
            raise NoGPT("Error: No GPT at LBA 1 ?!")

        # this is an error according to the specs
        if self.entrySize & (self.entrySize - 1):
            raise NoGPT("Error: entry size is not a power of 2")

        if self.myLBA == 1:
            sliceAddr = self.entryStart << self.shiftLBA
        else:
            sliceAddr = (self.entryStart - self.myLBA - 1) << self.shiftLBA

        self.slices = []
        crc = 0
        for i in range(self.entryCount):
            sbuf = buf[sliceAddr:sliceAddr + self.entrySize]
            crc = crc32(sbuf, crc)
            slice = GPTSlice(sbuf)
            if slice.type != UUID(int=0):
                self.slices.append(slice)

            sliceAddr += self.entrySize

        crc &= 0xFFFFFFFF

        if crc != self.entryCrc32:
            raise NoGPT("Error: bad slice entry CRC")

        last = 0
        for slice in self.slices:
            if slice.startLBA <= last:
                verbose("Note: slices are out of order in GPT")
                self.slices.sort(key=lambda s: s.startLBA)
                break
            last = slice.endLBA
Esempio n. 48
0
def read_records(f, format="<idd"):
    record_struct = Struct(format)
    chunks = iter(lambda: f.read(record_struct.size), b'')
    return (record_struct.unpack(chunk) for chunk in chunks)
Esempio n. 49
0
Testing module for the lazy_wav module
"""

import pytest
p = pytest.mark.parametrize

from tempfile import NamedTemporaryFile
from struct import Struct
import io

# Audiolazy internal imports
from ..lazy_wav import WavStream
from ..lazy_stream import Stream, thub
from ..lazy_misc import almost_eq, DEFAULT_SAMPLE_RATE

uint16pack = Struct("<H").pack
uint32pack = Struct("<I").pack


def riff_chunk(chunk_id, *contents, **kwargs):
    """ Build a bytestring object with the RIFF chunk contents. """
    assert len(chunk_id) == 4
    align = kwargs.pop("align", False)
    assert not kwargs  # No other keyword argument available
    joined_contents = b"".join(contents)
    content_size = len(joined_contents)
    parts = [chunk_id, uint32pack(content_size), joined_contents]
    if align and content_size & 1:  # Tricky! RIFF is word-aligned
        return b"".join(parts + [b"\x00"])
    else:
        return b"".join(parts)
Esempio n. 50
0
 def opened(self):
     JSMPEG_MAGIC = b'jsmp'
     JSMPEG_HEADER = Struct(native_str('>4sHH'))
     self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
Esempio n. 51
0
    def _read_ogs1_table35(self, data, ndata):
        """
        grid point stress discontinuities (plane stress/strain)

        TCODE =35 Grid point stresses for surfaces with plane strain
        1 EKEY I 10*grid point identification number and grid code
        2 NX RS Normal in x
        3 NY RS Normal in y
        4 NZ RS Normal in z (always -1)
        5 TXY RS Shear in xy
        6 PR RS Mean pressure (always -1)
        """
        result_name = 'grid_point_stress_discontinuities'
        if self._results.is_not_saved(result_name):
            return ndata
        self._results._found_result(result_name)
        slot = getattr(self, result_name)
        n = 0

        if self.num_wide == 6:
            obj_vector_real = GridPointStressesSurfaceDiscontinutiesArray

            #result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
            ntotal = 6 * 4 * self.factor
            nelements = ndata // ntotal
            assert ndata % (nelements * ntotal) == 0, ndata % (nelements * ntotal)
            auto_return, is_vectorized = self._create_oes_object4(
                nelements, result_name, slot, obj_vector_real)
            if auto_return:
                return nelements * ntotal

            obj = self.obj
            dt = self.nonlinear_factor

            if self.use_vector and is_vectorized:
                n = nelements * ntotal
                itotal = obj.ielement
                ielement2 = obj.itotal + nelements
                itotal2 = ielement2

                floats = frombuffer(data, dtype=self.fdtype).reshape(nelements, 6)#.copy()
                obj._times[obj.itime] = dt
                if obj.itime == 0:
                    ints = frombuffer(data, dtype=self.idtype).reshape(nelements, 6)
                    nids = ints[:, 0] // 10
                    assert nids.min() > 0, nids.min()
                    obj.node[itotal:itotal2] = nids

                #[nid, nx, ny, nz, txy, pressure]
                obj.data[obj.itime, itotal:itotal2, :] = floats[:, 1:]#.copy()
                obj.itotal = itotal2
                obj.ielement = ielement2
                n = ndata
            else:
                s = Struct(mapfmt(self._endian + b'i5f', self.size))
                nelements = ndata // ntotal  # 6*4
                for unused_i in range(nelements):
                    out = s.unpack(data[n:n+ntotal])
                    (nid_device, nx, ny, nz, txy, pressure) = out
                    nid = nid_device // 10
                    assert nid > 0, nid
                    self.obj.add_sort1(dt, nid, nx, ny, nz, txy, pressure)
                    n += ntotal
        else:
            msg = 'only num_wide=11 is allowed  num_wide=%s' % self.num_wide
            raise RuntimeError(msg)
        return n
Esempio n. 52
0
class _HeaderStorage(object):
    '''Implementation of raw header storage for flat files.

    Block headers are looked up by index, which in general is not equal to its height.

    If a block header is read that has not been set, MissingHeader is raised.

    Block headers are 80 bytes; the caller is responsible for their validation.

    Flat files are stored as a reserved area, followed by the headers consecutively.
    The reserved area has the following format:
       a) reserved area size (little endian uint16)
       b) version number (little endian uint16)
       c) block header count (little endian uint32)
    '''
    struct_reserved = Struct('<HHI')

    def __init__(self, filename):
        '''Create an object representing flat file header storage in filename.'''
        self.filename = filename
        self.mmap = None
        self.reserved_size = self.struct_reserved.size
        self.header_count = 0

    def _offset(self, key):
        return self.reserved_size + key * 80

    def _create_file(self, checkpoint):
        s = self.struct_reserved
        self.reserved_size = s.size
        with open(self.filename, 'wb') as f:
            f.write(s.pack(s.size, 0, checkpoint.height + 1))
            f.seek(self._offset(checkpoint.height))
            f.write(checkpoint.raw_header)

    def _open_file(self, checkpoint):
        logger.debug(f'opening headers file {self.filename}')
        s = self.struct_reserved
        self.mmap = map_file(self.filename)
        try:
            if len(self.mmap) >= s.size:
                self.reserved_size, version, self.header_count = s.unpack(
                    self.mmap[:s.size])
                # Note self[checkpoint.height] might raise MissingHeader
                if version == 0 and self[
                        checkpoint.height] == checkpoint.raw_header:
                    return
            raise _BadHeadersFile(f'invalid headers file {self.filename}')
        except Exception:
            self.mmap.close()
            self.mmap = None
            raise

    def open_or_create(self, checkpoint):
        try:
            self._open_file(checkpoint)
            return
        except FileNotFoundError:
            logger.debug(f'{self.filename} not found, creating it')
        except (_BadHeadersFile, MissingHeader):
            logger.debug(f're-creating headers file {self.filename}')
        self._create_file(checkpoint)
        self._open_file(checkpoint)

    def _set_count(self, count):
        self.mmap[4:8] = pack_le_uint32(count)

    def _set_raw_header(self, index, raw_header):
        if not isinstance(raw_header,
                          (bytes, bytearray)) or len(raw_header) != 80:
            raise TypeError('raw header must be binary of length 80')
        # Grow if needed
        mmap = self.mmap
        start = self._offset(index)
        if start >= len(mmap):
            mmap.close()
            mmap = self.mmap = map_file(self.filename,
                                        self._offset(index + 5_000))
        if index >= len(self):
            self._set_count(index + 1)
        self.mmap[start:start + 80] = raw_header

    def __getitem__(self, key):
        def header(index):
            start = self._offset(index)
            result = self.mmap[start:start + 80]
            if not result or result == empty_header:
                raise MissingHeader(f'no header at index {index}')
            return result

        if isinstance(key, int):
            if key < 0:
                key += len(self)
            return header(key)
        elif isinstance(key, slice):
            return [header(index) for index in range(*key.indices(len(self)))]
        raise TypeError(f'key {key} should be an integer')

    def __setitem__(self, key, raw_header):
        if isinstance(key, int):
            self._set_raw_header(key, raw_header)
        else:
            raise TypeError(f'key {key} should be an integer')

    def __len__(self):
        count, = unpack_le_uint32(self.mmap[4:8])
        return count

    def append(self, raw_header):
        header_index = len(self)
        self._set_raw_header(header_index, raw_header)
        return header_index

    def close(self):
        self.mmap.close()

    def flush(self):
        return self.mmap.flush()
Esempio n. 53
0
    def _read_ogs1_table26_numwide11(self, data, ndata):
        """surface stresses"""
        result_name = 'grid_point_surface_stresses'
        obj_vector_real = GridPointSurfaceStressesArray
        if self._results.is_not_saved(result_name):
            return ndata
        self._results._found_result(result_name)
        slot = getattr(self, result_name)
        n = 0

        #result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
        ntotal = 44 * self.factor # 4*11
        nelements = ndata // ntotal
        auto_return, is_vectorized = self._create_oes_object4(
            nelements, result_name, slot, obj_vector_real)
        if auto_return:
            return nelements * ntotal

        obj = self.obj
        dt = self.nonlinear_factor

        if self.use_vector and is_vectorized:
            n = nelements * ntotal
            itotal = obj.ielement
            ielement2 = obj.itotal + nelements
            itotal2 = ielement2

            floats = frombuffer(data, dtype=self.fdtype8).reshape(nelements, 11).copy()
            obj._times[obj.itime] = dt
            if obj.itime == 0:
                ints = frombuffer(data, dtype=self.idtype8).reshape(nelements, 11).copy()
                nids = ints[:, 0] // 10
                eids = ints[:, 1]
                assert nids.min() > 0, nids.min()
                obj.node_element[itotal:itotal2, 0] = nids
                obj.node_element[itotal:itotal2, 1] = eids

            #[fiber, nx, ny, txy, angle, major, minor, tmax, ovm]
            s4 = 'S%i' % self.size
            strings = frombuffer(data, dtype=self._uendian + s4).reshape(nelements, 11)[:, 2].copy()
            obj.location[itotal:itotal2] = strings
            obj.data[obj.itime, itotal:itotal2, :] = floats[:, 3:]#.copy()
            obj.itotal = itotal2
            obj.ielement = ielement2
            n = ndata
        else:
            fmt = self._endian + (b'2i4s8f' if self.size == 4 else b'2q8s8d')
            s = Struct(fmt)
            nelements = ndata // ntotal  # 11*4
            for unused_i in range(nelements):
                edata = data[n:n+ntotal]
                out = s.unpack(edata)
                (nid_device, eid, fiber, nx, ny, txy, angle, major, minor, tmax, ovm) = out
                nid = nid_device // 10
                fiber = fiber.decode('utf-8').strip()
                assert nid > 0, nid
                self.obj.add_sort1(dt, nid, eid, fiber, nx, ny, txy,
                                   angle, major, minor, tmax, ovm)
                n += ntotal

        assert ndata > 0, ndata
        assert nelements > 0, 'nelements=%r element_type=%s element_name=%r' % (nelements, self.element_type, self.element_name)
        #assert ndata % ntotal == 0, '%s n=%s nwide=%s len=%s ntotal=%s' % (self.element_name, ndata % ntotal, ndata % self.num_wide, ndata, ntotal)
        #assert self.num_wide * 4 * self.factor == ntotal, 'numwide*4=%s ntotal=%s' % (self.num_wide * 4, ntotal)
        assert n > 0, "n = %s result_name=%s" % (n, result_name)
        return n
Esempio n. 54
0
#!/usr/bin/env python
from struct import Struct

# obj = ClassName(args...)
s = Struct("HIHHI")   #  Struct s = new Struct("HIHHI");

print(dir(s))

binary_stream = s.pack(5, 2332, 183, 2909, 41)

print(binary_stream)

x = 123456
print(type(x))
print(dir(x))

print(x.numerator)
print(x.bit_length())

# use TitleCase for classes
#  lower_case_words for everything else
print(object)

class Dog:   # inherits from 'object'

    def bark(self):
        print("Woof! Woof!")

print(Dog)

dog1 = Dog()
Esempio n. 55
0
        raise Exception('invalid secret')
    interval = offset + int(time.time()) // 30
    msg = struct.pack('>Q', interval)
    digest = hmac.new(key, msg, hashlib.sha1).digest()
    o = 15 & (digest[19] if six.PY3 else ord(digest[19]))
    token = (struct.unpack('>I', digest[o:o + 4])[0] & 0x7fffffff) % 1000000
    return '{0:06d}'.format(token).encode('ascii')


##
# The following code is adapted from the pbkdf2_bin() function
# in here https://github.com/mitsuhiko/python-pbkdf2
# Copyright 2011 by Armin Ronacher. Licensed under BSD license.
# https://github.com/mitsuhiko/python-pbkdf2/blob/master/LICENSE
##
_pack_int = Struct('>I').pack

if six.PY3:

    def _pseudorandom(x, mac):
        h = mac.copy()
        h.update(x)
        return h.digest()

    def _pbkdf2(data, salt, iterations, keylen, hashfunc):
        mac = hmac.new(data, None, hashfunc)
        buf = []
        for block in range(1, -(-keylen // mac.digest_size) + 1):
            rv = u = _pseudorandom(salt + _pack_int(block), mac)
            for i in range(iterations - 1):
                u = _pseudorandom(u, mac)
Esempio n. 56
0
    def _read_ogs1_table27_numwide9(self, data, ndata):
        """
        TCODE =27 Volume with direct
        1 EKEY I 10*grid point identification number + Device Code
        2 NX RS Normal in x
        3 NY RS Normal in y
        4 NZ RS Normal in z
        5 TXY RS Shear in xy
        6 TYZ RS Shear in yz
        7 TZX RS Shear in zx
        8 PR RS Mean pressure
        9 HVM RS Hencky-von Mises or Octahedral
        """
        result_name = 'grid_point_stresses_volume_direct'
        if self._results.is_not_saved(result_name):
            return ndata

        obj_vector_real = GridPointStressesVolumeDirectArray
        self._results._found_result(result_name)
        slot = getattr(self, result_name)
        n = 0

        #result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
        ntotal = 36 * self.factor  # 9 * 4
        nelements = ndata // ntotal
        assert ndata % (nelements * ntotal) == 0, ndata % (nelements * ntotal)
        auto_return, is_vectorized = self._create_oes_object4(
            nelements, result_name, slot, obj_vector_real)
        if auto_return:
            return nelements * ntotal

        obj = self.obj
        dt = self.nonlinear_factor

        if self.use_vector and is_vectorized:
            n = nelements * ntotal
            itotal = obj.ielement
            ielement2 = obj.itotal + nelements
            itotal2 = ielement2

            floats = frombuffer(data, dtype=self.fdtype8).reshape(nelements, 9)#.copy()
            obj._times[obj.itime] = dt
            if obj.itime == 0:
                ints = frombuffer(data, dtype=self.idtype8).reshape(nelements, 9)
                nids = ints[:, 0] // 10
                assert nids.min() > 0, nids.min()
                obj.node[itotal:itotal2] = nids

            #[nid, nx, ny, nz, txy, tyz, txz, pressure, ovm]
            #strings = frombuffer(data, dtype=self._uendian + 'S4').reshape(nelements, 11)[:, 2].copy()
            #obj.location[itotal:itotal2] = strings
            obj.data[obj.itime, itotal:itotal2, :] = floats[:, 1:]#.copy()
            obj.itotal = itotal2
            obj.ielement = ielement2
            n = ndata
        else:
            fmt = mapfmt(self._endian + b'i8f', self.size)
            s = Struct(fmt)
            for unused_i in range(nelements):
                edata = data[n:n+ntotal]
                out = s.unpack(edata)
                (nid_device, nx, ny, nz, txy, tyz, txz, pressure, ovm) = out
                nid = nid_device // 10
                assert nid > 0, nid
                self.obj.add_sort1(dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm)
                n += ntotal
        return n
Esempio n. 57
0
STATUS_FATAL = 2

#Object types
OBJECT_GETPUBKEY = 0
OBJECT_PUBKEY = 1
OBJECT_MSG = 2
OBJECT_BROADCAST = 3
OBJECT_I2P = 0x493250
OBJECT_ADDR = 0x61646472

eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
    '>Q', random.randrange(1, 18446744073709551615))

#Compiled struct for packing/unpacking headers
#New code should use CreatePacket instead of Header.pack
Header = Struct('!L12sL4s')

VersionPacket = Struct('>LqQ20s4s36sH')

# Bitfield


def getBitfield(address):
    # bitfield of features supported by me (see the wiki).
    bitfield = 0
    # send ack
    if not BMConfigParser().safeGetBoolean(address, 'dontsendack'):
        bitfield |= BITFIELD_DOESACK
    return pack('>I', bitfield)

Esempio n. 58
0
    def _read_ogs1_table28_numwide15(self, data, ndata):
        """
        TCODE =28 Volume with principal
        1 EKEY I 10*grid point identification number + device code
        2 LXA RS Direction cosine from x to a
        3 LXB RS Direction cosine from x to b
        4 LXC RS Direction cosine from x to c

        5 LYA RS Direction cosine from y to a
        6 LYB RS Direction cosine from y to b
        7 LYC RS Direction cosine from y to c

        8 LZA RS Direction cosine from z to a
        9 LZB RS Direction cosine from z to b
        10 LZC RS Direction cosine from z to c

        11 SA RS Principal in a
        12 SB RS Principal in b
        13 SC RS Principal in c
        14 EPR RS Mean pressure
        15 EHVM RS Hencky-von Mises or octahedral
        """
        result_name = 'grid_point_stresses_volume_principal'
        obj_vector_real = GridPointStressesVolumePrincipalArray
        if self._results.is_not_saved(result_name):
            return ndata
        self._results._found_result(result_name)
        slot = getattr(self, result_name)
        n = 0

        #result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
        ntotal = 60 * self.factor # 15 * 4
        nelements = ndata // ntotal
        assert ndata % ntotal == 0
        auto_return, is_vectorized = self._create_oes_object4(
            nelements, result_name, slot, obj_vector_real)
        if auto_return:
            return nelements * ntotal

        obj = self.obj
        dt = self.nonlinear_factor
        if self.use_vector and is_vectorized and 0:
            n = nelements * ntotal
            #itotal = obj.ielement
            #ielement2 = obj.itotal + nelements
            #itotal2 = ielement2

            #floats = frombuffer(data, dtype=self.fdtype).reshape(nelements, 11).copy()
            #obj._times[obj.itime] = dt
            #if obj.itime == 0:
                #ints = frombuffer(data, dtype=self.idtype).reshape(nelements, 11).copy()
                #nids = ints[:, 0] // 10
                #eids = ints[:, 1]
                #assert nids.min() > 0, nids.min()
                #obj.node_element[itotal:itotal2, 0] = nids
                #obj.node_element[itotal:itotal2, 1] = eids

            ##[lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm]
            #strings = frombuffer(data, dtype=self._uendian + 'S4').reshape(nelements, 11)[:, 2].copy()
            #obj.location[itotal:itotal2] = strings
            #obj.data[obj.itime, itotal:itotal2, :] = floats[:, 3:]#.copy()
            #obj.itotal = itotal2
            #obj.ielement = ielement2
            #n = ndata
        else:
            s = Struct(mapfmt(self._endian + b'i14f', self.size))
            #nelements = ndata // 60  # 15*4
            for unused_i in range(nelements):
                edata = data[n:n+ntotal]
                out = s.unpack(edata)
                (eid_device, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm) = out
                eid = eid_device // 10
                assert eid > 0, eid
                #self.obj.add_sort1(dt, eid, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc,
                                   #sa, sb, sc, epr, ovm)
                n += ntotal

        assert ndata > 0, ndata
        assert nelements > 0, 'nelements=%r element_type=%s element_name=%r' % (nelements, self.element_type, self.element_name)
        #assert ndata % ntotal == 0, '%s n=%s nwide=%s len=%s ntotal=%s' % (self.element_name, ndata % ntotal, ndata % self.num_wide, ndata, ntotal)
        assert self.num_wide * 4 * self.factor == ntotal, 'numwide*4=%s ntotal=%s' % (self.num_wide * 4, ntotal)
        assert n > 0, "n = %s result_name=%s" % (n, result_name)
        return n
 def unpack_records(format, data):
     record_struct = Struct(format)
     return (record_struct.unpack_from(data, offset)
             for offset in range(0, len(data), record_struct.size))
Esempio n. 60
0
    inet_pton,
)

IFA_LOCAL = 2
IFF_UP = 0x1
IFLA_IFNAME = 3
NLMSG_ERROR = 2
RTM_NEWLINK = 16
RTM_GETLINK = 18
RTM_NEWADDR = 20
NLM_F_REQUEST = 0x1
NLM_F_ACK = 0x4
NLM_F_EXCL = 0x200
NLM_F_CREATE = 0x400

nlmsghdr = Struct('=IHHII')
nlmsgerr = Struct('i')
rtattr = Struct('HH')
ifinfomsg = Struct('BHiII')
ifaddrmsg = Struct('BBBBi')


def create_nlmsg(nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid, data):
    nlmsg_len = nlmsghdr.size + len(data)
    return nlmsghdr.pack(nlmsg_len, nlmsg_type, nlmsg_flags, nlmsg_seq,
                         nlmsg_pid) + data


def create_rtattr(rta_type, data):
    rta_len = rtattr.size + len(data)
    return rtattr.pack(rta_len, rta_type) + data