def array_packing(arrdef, *more_arrdef): """pack mulltiple arrays into same str take care of alignments between arrays """ arrtype, arr = arrdef mystruct = Struct(arrtype) last_bytes = mystruct.size cur_size = last_bytes * len(arr) mybuffer = StringIO() mybuffer.write(''.join(mystruct.pack(val) for val in arr)) for arrtype, arr in more_arrdef: mystruct = Struct(arrtype) cur_bytes = mystruct.size if cur_bytes > last_bytes: # align the string fill_bytes = align2pitch(cur_size, cur_bytes) mybuffer.write(ALIGN_CHAR * fill_bytes) cur_bytes += fill_bytes # write this arr cur_size = last_bytes * len(arr) mybuffer.write(''.join(mystruct.pack(val) for val in arr)) # leave notes last_bytes = cur_bytes rtn = mybuffer.getvalue() mybuffer.close() return rtn
def _get_index_translation_table(self): """Trying to validate/create the index table.""" start = time() struct = Struct("I") try: with open(self.res("trans_table"), "r") as table: table.seek(-4, os.SEEK_END) position = table.tell() self.task_size = position / 4 result = table.read(4) result = struct.unpack(result)[0] if result != 0: print "index translation table was incomplete. regenerating!" print "wasted %f seconds" % (time() - start) start = time() raise IOError print "validated index file from hard drive" except IOError: count = 0 with open(self.res("trans_table"), "w") as table: for num in fixed_bits(self.digits, self.bits_set): table.write(struct.pack(num)) count += 1 # add a "finished" zero-fourbyte table.write(struct.pack(0)) self.task_size = count print "wrote index translation table to file" print "took %f seconds for index translation table" % (time() - start)
def make_data(self, data): tabledict = self.table_by_name coldict = self.col_by_uniname # for tablename, rows in data.items(): # assert tablename in tabledict # tabledict[tablename].set_rows(rows) for table in self._tables: tablename = table.name table.set_rows(data[tablename]) package_header_struct = Struct(S_PACKAGE_HEADER_STRUCT) table_header_struct = Struct(S_TABLE_HEADER_STRUCT) col_header_struct = Struct(S_COL_HEADER_STRUCT) table_header_list = [] col_header_list = [] col_data_list = [] for table in self._tables: table_len = table.get_data() table_header_list.append(table_header_struct.pack(table_len)) if table_len: for col_uniname in table.col_uninames: col = coldict[col_uniname] store_type, compression_id, data = col.get_data() col_size = pitched_len(len(data), ALIGN_BYTES) col_header_list.append(col_header_struct.pack(store_type, compression_id)) col_data_list.append(data) self.logger.info("%s, %s, %s, %s", col_uniname, store_type, compression_id, col_size) package_header = package_header_struct.pack(0, 0) table_header_struct = ''.join(table_header_list) col_header_struct = ''.join(col_header_list) return make_aligned_blocks(ALIGN_BYTES, package_header, table_header_struct, col_header_struct, *col_data_list)
def inner_loop(self): if self.task_size == 0: print "already calculated everything." return self.outfile = open(self.res("output"), "a") outfile = self.outfile neigh = self.neigh stats_step = max(10, self.task_size / 2000) packstruct = Struct("q") cachesize = self.cachesize cachecontents = len(self.cache) print "writing out the size of the data dictionary every %d steps" % stats_step print "goint to calculate %d numbers." % (self.task_size) last_time = time() iterator = self.number_iter care_about_ordering = False for index, number in iterator: if self.cache[number] == 0: representant, (path, rule_arr), everything = minimize_rule_number(neigh, number) everything = everything.keys() everything.remove(number) try: everything.remove(representant) except ValueError: pass if len(everything) > 0: lowest = everything[0] # try lowering the number of inserted high numbers for num in everything: if num > number and cachecontents < cachesize and (num < lowest or not care_about_ordering): if self.cache[num] == 0: self.cache[num] = representant cachecontents += 1 lowest = num if number == representant: outfile.write(packstruct.pack(-len(everything))) else: outfile.write(packstruct.pack(representant)) else: self.cachehits += 1 if cachecontents > self.max_cache_fill: self.max_cache_fill = cachecontents if cachecontents > 0.75 * cachesize: care_about_ordering = True cachecontents -= 1 val = self.cache[number] del self.cache[number] self.outfile.write(packstruct.pack(val)) if index % stats_step == 0: endtime, last_time = time() - last_time, time() self.timings.write("%f\n" % ((endtime * 1000) / stats_step)) self.items_done += 1
def main(): (archstr, inputfn, outputfn, vmaddr) = parse_options() arch = Arch(archstr) with open(inputfn, "rb") as fin, open(outputfn, "wb") as fout: fin.seek(0, os.SEEK_END) filesize = fin.tell() fin.seek(0, os.SEEK_SET) excess = 16 - (filesize & 16) filesize += excess endian = arch.endian is64bit = arch.is64bit # prepare mach_header(_64) cputype = arch.cputype cpusubtype = arch.cpusubtypePacked if is64bit: magic = 0xFEEDFACF mach_header = Struct(endian + "7I4x") else: magic = 0xFEEDFACE mach_header = Struct(endian + "7I") # prepare segment_command(_64) if is64bit: segment_command = Struct(endian + "2I16s4Q4I") cmd = LC_SEGMENT_64 else: segment_command = Struct(endian + "2I16s8I") cmd = LC_SEGMENT cmdsize = segment_command.size protlevel = 5 # prepare section(_64) if is64bit: section_stru = Struct(endian + "16s16s2Q7I4x") else: section_stru = Struct(endian + "16s16s9I") cmdsize += section_stru.size fileoff = cmdsize + mach_header.size header_bytes = mach_header.pack(magic, cputype, cpusubtype, 1, 1, cmdsize, 1) fout.write(header_bytes) segment_bytes = segment_command.pack(cmd, cmdsize, "__TEXT", vmaddr, filesize, fileoff, filesize, 5, 5, 1, 0) fout.write(segment_bytes) section_bytes = section_stru.pack("__text", "__TEXT", vmaddr, filesize, fileoff, 4, 0, 0, 0x80000400, 0, 0) fout.write(section_bytes) copyfileobj(fin, fout) fout.write(b"\0" * excess)
def write(filename, mesh): ppack = Struct('fff') #float3 npack = ppack ipack = Struct('H') #ushort pbuffer = BytesIO() nbuffer = BytesIO() ibuffer = BytesIO() reindex = {} total = 0 vertices = mesh.vertices for face in mesh.faces: indices = face.vertices if len(indices) == 3: indices = [indices[0], indices[2], indices[1]] if not face.use_smooth: N = face.normal for index in indices: V = vertices[index].co pbuffer.write( ppack.pack(V.x, V.y, V.z) ) nbuffer.write( npack.pack(N.x, N.y, N.z) ) ibuffer.write( ipack.pack(total) ) total += 1 else: for index in indices: if index not in reindex: reindex[index] = total total += 1 V = vertices[index].co N = vertices[index].normal pbuffer.write( ppack.pack(V.x, V.y, V.z) ) nbuffer.write( npack.pack(N.x, N.y, N.z) ) ibuffer.write( ipack.pack( reindex[index] ) ) else: raise Exception("Untriangulated face.") try: os.mkdir(filename) except OSError: pass # probably already exists file = open(filename + '\\position', 'wb') file.write( pbuffer.getvalue() ) file.close() file = open(filename + '\\normal', 'wb') file.write( nbuffer.getvalue() ) file.close() file = open(filename + '\\index', 'wb') file.write( ibuffer.getvalue() ) file.close()
def write_op2(self, f, is_mag_phase=False): #print('data_code =', self.data_code) self._write_table_header(f) if isinstance(self.nonlinear_factor, float): op2_format = '%sif' % (7 * self.ntimes) else: op2_format = '2i6f' * self.ntimes s = Struct(op2_format) node = self.node_gridtype[:, 0] gridtype = self.node_gridtype[:, 1] format_table4_1 = Struct(b'9i') format_table4_2 = Struct(b'3i') # table 4 info nnodes = self.data.shape[0] nnodes_device = self.node_gridtype[:, 0] * 10 + self.device_code #(2+6) => (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i) ntotal = self.ntimes * nnodes * (2 + 6) table_num = 3 for itime in xrange(self.ntimes): self._write_op2_header(f, table_num, itime) # record 4 header = [4, 0, 4, 4, -table_num - 1, 4, 4, 4 * ntotal, 4] f.write(format_table4_1.pack(*header)) t1 = self.data[itime, :, 0] t2 = self.data[itime, :, 1] t3 = self.data[itime, :, 2] r1 = self.data[itime, :, 3] r2 = self.data[itime, :, 4] r3 = self.data[itime, :, 5] i = 0 for node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i in izip(nnodes_device, gridtype, t1, t2, t3, r1, r2, r3): vals = (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i) #grid = nodeID*10+device_code f.write(s.pack(*vals)) table_num -= 2 header = [4, 4 * ntotal, 4] f.write(format_table4_2.pack(*header))
def write_records(records, format, f): ''' Zapis sekwencji krotek do pliku binarnego ze strukturami ''' record_struct = Struct(format) for r in records: f.write(record_struct.pack(*r))
def _servePublicKey(self): # either serve publickey or start encryption packetid = self.buff[0] if packetid == P_REQUEST_PUBLIC_KEY: keydata = self.keymanager.publickey.exportKey('DER') length = len(keydata) packetformat = Struct('!BH%ds' % length) self.sock.send(packetformat.pack(P_PUBLIC_KEY, length, keydata)) del self.buff[0] elif packetid == P_REQUEST_ENCRYPTION_START: bufferlength = len(self.buff) signedlength = 1 + 256 + self.cipherscheme.block_size packetlength = signedlength + 256 if bufferlength < packetlength: return signable = str(buffer(self.buff, 0, signedlength)) + self.biscuit signature = str(buffer(self.buff, signedlength, 256)) if not self.keymanager.verify(signable, signature, self.hostkey): self._sendError(P_ERROR_BAD_SIGNATURE) raise InvalidSignatureException('Client failed to sign message') logging.debug('Client is authenticated') self.ensecret = self.keymanager.decrypt(str(buffer(self.buff, 1, 256))) self.eniv = str(buffer(self.buff, 257, self.cipherscheme.block_size)) self._startEncryption() del self.buff[:packetlength] else: self._sendError(P_ERROR_BAD_PACKET) raise BadPacketIdentifierException('Unexpected packet ID')
def putPacket(self, payload, flags=0): if flags & 0b00000001: payload = compress(payload) # length calculations blocklength = self.cipherscheme.block_size payloadlength = len(payload) paddinglength = 4 + blocklength - (10 + payloadlength) % blocklength packetlength = 6 + payloadlength + paddinglength # create packet fields = ( packetlength, paddinglength, flags, payload, self.prng.read(paddinglength) ) packetformat = Struct('!LBB%ds%ds' % (payloadlength, paddinglength)) encpacket = bytearray(self.encipher.encrypt(packetformat.pack(*fields))) # update message authentication self.ehmac.update(self.longformat.pack(self.enseqno)) self.ehmac.update(buffer(encpacket)) self.enseqno += 1 # append the most recent digest encpacket.extend(self.ehmac.digest()) # put packet on the wire packet = buffer(encpacket) while packet: packet = packet[self.sock.send(packet):]
def write_records(records, format, f): ''' Write a sequence of tuples to a binary file of structures. ''' record_struct = Struct(format) for r in records: f.write(record_struct.pack(*r))
class StreamSerializer(object): """Helper to pass python objects over streams.""" length_format = '!i' def __init__(self): self.length_struct = Struct(self.length_format) self.length = calcsize(self.length_format) @staticmethod def encode(obj): return pickle.dumps(obj) @staticmethod def decode(message): return pickle.loads(message) def encode_with_length(self, obj): """Encode object and prepend length to message.""" message = self.encode(obj) return self.length_struct.pack(len(message)) + message def decode_from_stream(self, fd, timeout=5): """Read object from given stream and return it.""" rlist, _, _ = select([fd], [], [], timeout) if not rlist: raise RuntimeError("Can't read object from {0!r}.".format(fd)) message_length = self.length_struct.unpack(os.read(fd, self.length))[0] assert message_length > 0, 'wrong message length provided' return self.decode(os.read(fd, message_length))
def write_binary_stl(self, stl_filename): """Write an STL binary file.""" f = open(stl_filename, "wb") if hasattr(self, 'header'): self.header.ljust(80, '\0') f.write(self.header) else: header = '%-80s' % stl_filename f.write(pack('80s', header)) a = [0.,0.,0.] b = [0.,0.,0.] c = [0.,0.,0.] nelements, three = self.elements.shape f.write(pack('i', nelements)) elements = self.elements p1 = self.nodes[elements[:, 0], :] p2 = self.nodes[elements[:, 1], :] p3 = self.nodes[elements[:, 2], :] a = p2 - p1 b = p3 - p1 n = cross(a, b) del a, b #n /= norm(n, axis=1) s = Struct('12fH') for eid, element in enumerate(elements): data = s.pack(n[eid, 0], n[eid, 1], n[eid, 2], p1[eid, 0], p1[eid, 1], p1[eid, 2], p2[eid, 0], p2[eid, 1], p2[eid, 2], p3[eid, 0], p3[eid, 1], p3[eid, 2], 0) f.write(data) f.close()
class ULInt24(StaticField): """ A custom made construct for handling 3-byte types as used in ancient file formats. A better implementation would be writing a more flexable version of FormatField, rather then specifically implementing it for this case """ __slots__ = ["packer"] def __init__(self, name): self.packer = Packer("<BH") StaticField.__init__(self, name, self.packer.size) def __getstate__(self): attrs = StaticField.__getstate__(self) attrs["packer"] = attrs["packer"].format return attrs def __setstate__(self, attrs): attrs["packer"] = Packer(attrs["packer"]) return StaticField.__setstate__(attrs) def _parse(self, stream, context): try: vals = self.packer.unpack(_read_stream(stream, self.length)) return vals[0] + (vals[1] << 8) except Exception: ex = sys.exc_info()[1] raise FieldError(ex) def _build(self, obj, stream, context): try: vals = (obj%256, obj >> 8) _write_stream(stream, self.length, self.packer.pack(vals)) except Exception: ex = sys.exc_info()[1] raise FieldError(ex)
class BinStruct(object): _format = '!c' _type = None _min = None _max = None def __init__(self): self.struct = SStruct(self._format) self._custom_size = None @property def size(self): return self.struct.size @property def custom_size(self): return self._custom_size @custom_size.setter def custom_size(self, value): self._custom_size = value def unpack(self, string): return self.struct.unpack(string) def pack(self, string): return self.struct.pack(string)
def parseLine( self, line ): "Parse a gcode line." binary16ByteRepository = self.binary16ByteRepository splitLine = line.split() if len( splitLine ) < 1: return firstWord = splitLine[ 0 ] if len( firstWord ) < 1: return firstLetter = firstWord[ 0 ] if firstLetter == '(': return feedRateInteger = getIntegerFromCharacterLengthLineOffset( 'F', 0.0, splitLine, binary16ByteRepository.feedRateStepLength.value ) iInteger = getIntegerFromCharacterLengthLineOffset( 'I', 0.0, splitLine, binary16ByteRepository.xStepLength.value ) jInteger = getIntegerFromCharacterLengthLineOffset( 'J', 0.0, splitLine, binary16ByteRepository.yStepLength.value ) xInteger = getIntegerFromCharacterLengthLineOffset( 'X', binary16ByteRepository.xOffset.value, splitLine, binary16ByteRepository.xStepLength.value ) yInteger = getIntegerFromCharacterLengthLineOffset( 'Y', binary16ByteRepository.yOffset.value, splitLine, binary16ByteRepository.yStepLength.value ) zInteger = getIntegerFromCharacterLengthLineOffset( 'Z', binary16ByteRepository.zOffset.value, splitLine, binary16ByteRepository.zStepLength.value ) sixteenByteStruct = Struct( 'cBhhhhhhBc' ) # print( 'xInteger' ) # print( xInteger ) flagInteger = getIntegerFlagFromCharacterSplitLine( 'X', splitLine ) flagInteger += 2 * getIntegerFlagFromCharacterSplitLine( 'Y', splitLine ) flagInteger += 4 * getIntegerFlagFromCharacterSplitLine( 'Z', splitLine ) flagInteger += 8 * getIntegerFlagFromCharacterSplitLine( 'I', splitLine ) flagInteger += 16 * getIntegerFlagFromCharacterSplitLine( 'J', splitLine ) flagInteger += 32 * getIntegerFlagFromCharacterSplitLine( 'F', splitLine ) packedString = sixteenByteStruct.pack( firstLetter, int( firstWord[ 1 : ] ), xInteger, yInteger, zInteger, iInteger, jInteger, feedRateInteger, flagInteger, '#' ) self.output.write( packedString )
def write_records(records, binformat, f): """ Write a sequence of tuples to a binary file of structures. """ record_struct = Struct(binformat) for r in records: f.write(record_struct.pack(*r))
def write_records(records, format, f): """ Write a sequence of iterables to a binary file """ record_struct = Struct(format) for rec in records: f.write(record_struct.pack(*rec))
def write_binary_stl(self, stl_filename): """Write an STL binary file.""" infile = open(stl_filename, "wb") if hasattr(self, 'header'): self.header.ljust(80, '\0') infile.write(self.header) else: header = '%-80s' % stl_filename infile.write(pack('80s', header)) #avector = [0., 0., 0.] #bvector = [0., 0., 0.] #cvector = [0., 0., 0.] nelements = self.elements.shape[0] infile.write(pack('i', nelements)) elements = self.elements p1 = self.nodes[elements[:, 0], :] p2 = self.nodes[elements[:, 1], :] p3 = self.nodes[elements[:, 2], :] avector = p2 - p1 bvector = p3 - p1 n = cross(avector, bvector) del avector, bvector #n /= norm(n, axis=1) s = Struct('12fH') for eid, element in enumerate(elements): data = s.pack(n[eid, 0], n[eid, 1], n[eid, 2], p1[eid, 0], p1[eid, 1], p1[eid, 2], p2[eid, 0], p2[eid, 1], p2[eid, 2], p3[eid, 0], p3[eid, 1], p3[eid, 2], 0) infile.write(data) infile.close()
def merge_to_binary(indirec, outfile): def safefloat(string): try: return float(string) except ValueError: if len(string) == 0: return 0.0 else: return None def safeint(string): try: return int(string.strip()) except ValueError: if len(string) == 0: return 0 else: return None files = sorted([x for x in os.listdir(indirec) if x.endswith('.conv')]) max_len = max(len(x.split('--')[0]) for x in files) mapping = (('Start','i', safeint), ('End','i', safeint), ('Num', 'i', safeint), ('Prot', '%ic'%max_len, lambda x: x.ljust(max_len)), ('Score', 'f', safefloat), ('Cons', 'f', safefloat), ('Info', 'f', safefloat), ('Dist', 'f', safefloat), ('Seq', 'c', lambda x:x)) mapping_list = [] fmtstr = '' for field in LINK_FIELDS: for key, fmt, func in mapping: if key in field: print field, key, fmt fmtstr += fmt mapping_list.append(func) break StructClass = Struct(fmtstr) handle = FileInput([os.path.join(indirec, x) for x in files]) reader = csv.reader(handle, delimiter = '\t') grouper = itemgetter(*range(6)) #group by source-prot through target-end buf = open(outfile, 'wb') count = 0 for ind, (key, rows) in enumerate(groupby(reader, key = grouper)): trow = rows.next() converted = [func(field) for func, field in zip(mapping_list, trow)] #print zip(LINK_FIELDS, converted, trow) if all(x is not None for x in converted): arglist = list(chain(converted[0], converted[1], converted[2:])) data = StructClass.pack(*arglist) buf.write(data) count += 1 if count % 10000 == 0: print count, key
class convert_128(object): def __init__(self, conv64): self.fmt = Struct(b('!2') + conv64.format[-1:]) def pack(self, num): return self.fmt.pack(num >> 64, num & 0xffffffffffffffff) def unpack(self, s): (a, b) = self.fmt.unpack(s) return ((a << 64) | b,)
def writeRecord(record, format, filename): ''' Write a tuple to a binary file. ''' with open(filename, 'wb') as f: record_struct = Struct(format) f.write(record_struct.pack(*record))
def write_binary(row_set, schema, output): column_types = schema['columnTypes'] desc = '<' + ''.join(type_fmt(type_) for type_ in column_types) logging.info("Creating a binary file with struct.fmt={}".format(desc)) struct = Struct(desc) for row in row_set: vals = [cell.value for cell in row] output.write(struct.pack(*vals))
class TAG_Int_Array(TAG, MutableSequence): """ TAG_Int_Array, comparable to a collections.UserList with an intrinsic name whose values must be integers """ id = TAG_INT_ARRAY def __init__(self, name=None, buffer=None): # TODO: add a value parameter as well super(TAG_Int_Array, self).__init__(name=name) if buffer: self._parse_buffer(buffer) def update_fmt(self, length): """ Adjust struct format description to length given """ self.fmt = Struct(">" + str(length) + "i") # Parsers and Generators def _parse_buffer(self, buffer): length = TAG_Int(buffer=buffer).value self.update_fmt(length) self.value = list(self.fmt.unpack(buffer.read(self.fmt.size))) def _render_buffer(self, buffer): length = len(self.value) self.update_fmt(length) TAG_Int(length)._render_buffer(buffer) buffer.write(self.fmt.pack(*self.value)) # Mixin methods def __len__(self): return len(self.value) def __iter__(self): return iter(self.value) def __contains__(self, item): return item in self.value def __getitem__(self, key): return self.value[key] def __setitem__(self, key, value): self.value[key] = value def __delitem__(self, key): del (self.value[key]) def insert(self, key, value): self.value.insert(key, value) # Printing and Formatting of tree def valuestr(self): return "[%i int(s)]" % len(self.value)
class TARGET: """Given a ctype (initialized or not) this coordinates all the information needed to read, write and compare.""" def __init__ (self, ctype): self.alignment = 1 self.ctype = ctype # size of target data self.size = sizeof (ctype) self.type = ctype._type_ # get the format type needed for struct.unpack/pack. while hasattr (self.type, "_type_"): self.type = self.type._type_ # string_buffers and char arrays have _type_ 'c' # but that makes it slightly slower to unpack # so swap is for 's'. if self.type == "c": self.type = "s" # calculate byte alignment. this speeds up scanning substantially # because we can read and compare every alignment bytes # instead of every single byte. # although if we are scanning for a string the alignment is defaulted to 1 \ # (im not sure if this is correct). elif ASSUME_ALIGNMENT: # calc alignment divider = 1 for i in xrange (4): divider *= 2 if not self.size % divider: self.alignment = divider # size of target ctype. self.type_size = calcsize (self.type) # length of target / array length. self.length = self.size / self.type_size self.value = getattr (ctype, "raw", ctype.value) # the format string used for struct.pack/unpack. self.format = str (self.length) + self.type # efficient packer / unpacker for our own format. self.packer = Struct (self.format) def get_packed (self): """Gets the byte representation of the ctype value for use with WriteProcessMemory.""" return self.packer.pack (self.value) def __str__ (self): return str (self.ctype) [:10] + "..." + " <" + str (self.value)[:10]+ "..." + ">"
def write_table_header(f, fascii, table_name): table0 = [ 4, 2, 4, 8, bytes(table_name), 8, #4, 0, 4, ] assert len(table_name) == 8, table_name table0_format = '<4i 8s i' st = Struct(table0_format) f.write(st.pack(*table0)) fascii.write('OUG header0 = %s\n' % table0)
def encode(self): if None is self.identifier: return "" format = "2B%(info)ds%(data)dsB" % { "info": len(str(bytearray(self.info))), "data": len(str(bytearray(self.data))), } packer = Struct(format) return packer.pack(0x99, self.identifier, str(bytearray(self.info)), str(bytearray(self.data)), 0x66)
def infoDeclare(dictionary, packet_type, base_type, name, index): # setup dictionary if 'type2type_id' not in dictionary: dictionary['type2type_id'] = {} if 'type_id2type' not in dictionary: dictionary['type_id2type'] = {} if 'type2name' not in dictionary: dictionary['type2name'] = {} if 'name2type' not in dictionary: dictionary['name2type'] = {} if 'PacketFactory' not in dictionary: dictionary['PacketFactory'] = {} if 'PacketNames' not in dictionary: dictionary['PacketNames'] = {} # setup _type packet_type.type = index for attr, default, _s_type in packet_type.info: setattr(packet_type, attr, default) # binpack info packet_type.binarypack_info = [(attr, s_type) for attr, _default, s_type in packet_type.info if s_type != 'no net'] packet_type.msgpack_info = [(attr, s_type) for attr, _default, s_type in packet_type.info if s_type not in ('no net', 'type')] # fast pack struct_format = '!BH' attr_names = [] for attr, default, s_type in packet_type.info: # skip 'no net' attributes if s_type == 'no net': continue # break if type is not fixed length and no other evaluation is needed if s_type not in ('B', 'H', 'I', 'Q'): break # struct_format += s_type attr_names.append(attr) else: if attr_names: fast_struct = Struct(struct_format) fast_struct_size = fast_struct.size - 3 # 3 is the size of the packet head packet_type.binarypack_fast_pack = lambda p: fast_struct.pack( index, fast_struct_size, *[getattr(p, attr) for attr in attr_names] ) # insert type into dictionary dictionary['type2type_id'][packet_type] = index dictionary['type_id2type'][index] = packet_type dictionary['type2name'][packet_type] = packet_type.__name__ dictionary['name2type'][packet_type.__name__] = packet_type dictionary['PacketNames'][index] = name dictionary['PacketFactory'][index] = packet_type dictionary['PACKET_' + name] = index
class Header(object): def __init__(self): self._format = Struct("<BBHL4s") self.size = None self.data_type = ".FIT" self.data_size = None self.crc = Crc() self.protocol_version = 16 self.profile_version = 1005 def __nonzero__(self): return self.size is not None def __repr__(self): return '<%s protocol=%d profile=%d crc=%r>' % ( self.__class__.__name__, self.protocol_version, self.profile_version, self.crc ) @property def total_size(self): return self.size + self.data_size + self.crc.size @property def valid(self): return self.data_type == ".FIT" def read(self, chunk): ( self.size, self.protocol_version, self.profile_version, self.data_size, self.data_type ) = self._format.unpack(chunk[:12]) if len(chunk) == 14: self.crc.read(chunk[12:]) if self.crc.value and not self.crc.check(chunk[:12]): raise BodyFormatError("Invalid CRC %x, should be %x" % ( compute_crc(chunk[:12]), self.crc.value)) def write(self): self.size = 14 chunk = self._format.pack(self.size, self.protocol_version, self.profile_version, self.data_size, self.data_type) self.crc.value = compute_crc(chunk) return chunk + self.crc.write()
def _get_response_packet(self, unit): name_bytes = unit.name.encode("utf8") + b"\x00" name_len = len(name_bytes) response_struct = Struct(self.RESPONSE_FMT.format(name_len = name_len)) response_data = response_struct.pack( self.guid, name_bytes, unit.get_race(), unit.get_gender(), unit.get_class() ) return WorldPacket(OpCode.SMSG_NAME_QUERY_RESPONSE, response_data)
def _write_qbdy2(load_type, loads, nloads, op2, op2_ascii, endian): """writes the QBDY2s""" key = (4909, 49, 240) nfields = 10 spack = Struct(endian + b'ii8f') nbytes = write_header(load_type, nfields, nloads, key, op2, op2_ascii) for load in loads: #(sid, eid, q1, q2, q3, q4, q5, q6, q7, q8) = out qflux = list(load.qfluxs) nflux = len(qflux) if nflux < 8: qflux = qflux + [0.] * (8 - nflux) data = [load.sid, load.eid] + qflux op2_ascii.write(' QBDY2 data=%s\n' % str(data)) op2.write(spack.pack(*data)) return nbytes
def _write_sload(load_type, loads, op2, op2_ascii, endian): """writes the SLOADs""" key = (5401, 54, 25) data = [] for load in loads: for nid, mag in zip(load.nodes, load.mags): #(sid, nid, scale_factor) = out datai = [load.sid, nid, mag] op2_ascii.write(' SLOAD data=%s\n' % str(datai)) data += datai nfields = len(data) nloads = nfields // 3 spack = Struct(endian + b'iif' * nloads) nbytes = write_header_nvalues(load_type, nfields, key, op2, op2_ascii) op2.write(spack.pack(*data)) return nbytes
def _write_mat1(model, name, mids, nmaterials, op2_file, op2_ascii, endian): """writes the MAT1""" key = (103, 1, 77) nfields = 12 spack = Struct(endian + b'i10fi') nbytes = write_header(name, nfields, nmaterials, key, op2_file, op2_ascii) for mid in sorted(mids): mat = model.materials[mid] #mid, E, G, nu, rho, A, tref, ge, St, Sc, Ss, mcsid data = [ mid, mat.e, mat.g, mat.nu, mat.rho, mat.a, mat.tref, mat.ge, mat.St, mat.Sc, mat.Ss, mat.mcsid ] op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:])) op2_file.write(spack.pack(*data)) return nbytes
def _write_mat10(model, name, mids, nmaterials, op2_file, op2_ascii, endian): """writes the MAT10""" key = (2801, 28, 365) nfields = 5 spack = Struct(endian + b'i4f') nbytes = write_header(name, nfields, nmaterials, key, op2_file, op2_ascii) for mid in sorted(mids): mat = model.materials[mid] #(mid, bulk, rho, c, ge) = out data = [mid, mat.bulk, mat.rho, mat.c, mat.ge] assert len(data) == nfields #print('MAT10 -', data, len(data)) op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:])) op2_file.write(spack.pack(*data)) return nbytes
def tuple_to_bin_file(records, format, f): """元组写入二进制文件中 使用 struct 模块处理二进制数据,每个元组编码为一个结构体,写入一个二进制文件内 Args: records: 元组对象 format: struct格式化字符串 f: file object. Returns: None Raises: """ record_struct = Struct(format) for r in records: f.write(record_struct.pack(*r))
class DataType(object): """Base class for data type objects Base class for objects to be used as parser in combination with :class:`~modbusclient.Payload` objects. This base wraps a :class:`~struct.Struct` parser for the conversion between python objects and binary strings. Arguments: format (str): Format definition as used by :class:`~struct.Struct`. If the leading exclamation mark ('!') is omitted, it is added automatically. Other byte order marks have to be specified explicitly. nan (object): NAN value to use for this object. swap_words (bool): Swap registers (DWORDS) before conversion. This may be necessary depending on the memory layout used by the application, since MODBUS does not define, how multi-word data types are distributed over various registers. Defaults to `False`. """ def __init__(self, format, nan=None, swap_words=False): if len(format) > 0 and format[0] in "<>!=": fmt = format else: fmt = "".join(["!", format]) self._parser = Struct(format=fmt) self._nan = nan self._swap_words = swap_words def __len__(self): """Get length of this message in bytes Return: int: Number of bytes required by this message """ return self._parser.size def encode(self, *values): retval = self._parser.pack(*values) if self._swap_words: retval = swap_words(retval) return retval def decode(self, bytes): if self._swap_words: buf = swap_words(bytes) else: buf = bytes return self._parser.unpack(buf)
def serialize_anl(out_file, stages, starttime_seconds, epoch_duration_millies, colors=None, lineWidth=None): '''Serialize the values from the 1D generator stages into an ANL file for the specified recording start time. Colors may be specified in a list, where the index should match the associated value in stages. Colors are specified as little-endian RGB (without alpha). Line widths (replace event duration for sleep stage files only) may be specified in percent of graph height). ''' # use default colorscheme if none has been specified if colors is None: blue = 0x00FF0000 red = 0x000000FF colors = [red, red, red, blue, red, red] # use line width if lineWidth is None: width_normal = 0x00000000 width_30percent = 0x00000001 * 1000 * 1000 * 60 * 60 * 24 * 30 # 30 days: 30% line width lineWidth = [ width_normal, width_normal, width_normal, width_30percent, width_normal, width_normal ] # convert date from UNIX timestamps to "borland/excel days since 01-01-1900" # 25569 days between 01-01-1900 and 01-01-1970 # 86400 seconds per day start_timestamp_us = (starttime_seconds + (25569 * 86400)) * 1000 * 1000 epoch_duration_us = int(epoch_duration_millies * 1000) # serialize header out_file.write(b'000000CB\r\n') # serialize records for all values that the generator yields serializer = Struct('<qqIiiB') timestamp_us = int(start_timestamp_us) for epoch_stage in stages: stage = int(epoch_stage) out_file.write( serializer.pack(timestamp_us, lineWidth[stage], colors[stage], 0, stage, 0)) timestamp_us = timestamp_us + epoch_duration_us
def write_pbarl(name, pids, itable, op2_file, op2_ascii, obj, endian=b'<'): """writes the PBARL""" key = (9102, 91, 52) fmt0 = endian + b'2i8s8sf' ndims = 0 nproperties = len(pids) for pid in sorted(pids): prop = obj.properties[pid] ndim = len(prop.dim) ndims += ndim nvalues = 8 * nproperties + ndims + 3 # +3 comes from the keys nbytes = nvalues * 4 op2_file.write(pack('3i', *[4, nvalues, 4])) op2_file.write(pack('i', nbytes)) #values, nbtyes)) op2_file.write(pack('3i', *key)) op2_ascii.write('%s %s\n' % (name, str(key))) for pid in sorted(pids): prop = obj.properties[pid] # value is the first term in dim #(pid, mid, group, beam_type, value) = out bar_type = ('%-8s' % prop.beam_type).encode('ascii') group = ('%-8s' % prop.group).encode('ascii') data_in = [prop.pid, prop.mid, group, bar_type] ndim = len(prop.dim) fmti = b'%ifi' % ndim struct1 = Struct(fmt0 + fmti) data_in += prop.dim data_in.append(prop.nsm) data_in.append(-1) op2_file.write(struct1.pack(*data_in)) op2_ascii.write(str(data_in) + '\n') op2_file.write(pack('i', nbytes)) itable -= 1 data = [ 4, itable, 4, 4, 1, 4, 4, 0, 4] op2_file.write(pack('9i', *data)) op2_ascii.write(str(data) + '\n') return itable
class HeaderPacker: """Packing and unpacking header instances.""" def __init__(self, header_format_class, endian='>'): self._header_format_class = header_format_class self._format, self._field_name_allocations = compile_struct( header_format_class, header_format_class.START_OFFSET_IN_BYTES, header_format_class.LENGTH_IN_BYTES, endian) self._struct = Struct(self._format) @property def header_format_class(self): return self._header_format_class def pack(self, header): """Pack a header into a buffer. """ if not isinstance(header, self._header_format_class): raise TypeError("{}({}) cannot pack header of type {}.".format( self.__class__.__name__, self._header_format_class.__name__, header.__class__.__name__)) values = [ getattr(header, names[0]) for names in self._field_name_allocations ] return self._struct.pack(*values) def unpack(self, buffer): """Unpack a header into a header object. Overwrites any existing header field values with new values obtained from the buffer. Returns: The header object. """ values = self._struct.unpack(buffer) kwargs = { name: value for names, value in zip(self._field_name_allocations, values) for name in names } return self._header_format_class(**kwargs) def __repr__(self): return "{}({})".format(self.__class__.__name__, self._header_format_class.__name__)
def _write_flutter(model: Union[BDF, OP2Geom], name: str, flutter_ids: List[int], ncards: int, op2, op2_ascii, endian: bytes) -> int: """ (3902, 39, 272) MSC 2018.2 Word Name Type Description 1 SID I 2 METHOD(2) CHAR4 4 DENS I 5 MACH I 6 RFREQ I 7 IMETH(2) CHAR4 SFLG=0 (std) 9 NEIGN I nvalue 10 EPR RS 11 SFLG I SWEEP FLAG SFLG=1 (sweep) 9 FMAX RS maximum frequency 10 EPR RS 11 SFLG I SWEEP FLAG End SFLG Words 1 through max repeat until End of Record NX: data = (30, PK, 1, 2, 3, L, 3, 0.001, -1) """ key = (3902, 39, 272) nfields = 10 structi = Struct(endian + b'i 8s 3i 8s ifi') nbytes = write_header(name, nfields, ncards, key, op2, op2_ascii) for flutter_id in flutter_ids: flutter = model.flutters[flutter_id] # type: FLUTTER #print(flutter.get_stats()) method = b'-%8s' % flutter.method.decode('latin1') imethod = b'-%8s' % flutter.imethod.decode('latin1') data = [ flutter.sid, method, flutter.density, flutter.mach, flutter.reduced_freq, imethod, flutter.nvalue, flutter.epsilon, -1 ] assert None not in data, data op2_ascii.write(f' FLUTTER data={data}\n') op2.write(structi.pack(*data)) return nbytes
def _write_pload1(load_type, loads, nloads, op2, op2_ascii, endian): """writes the PLOAD1s""" key = (6909, 69, 198) nfields = 8 spack = Struct(endian + b'4i4f') nbytes = write_header(load_type, nfields, nloads, key, op2, op2_ascii) for load in loads: #(sid, eid, load_type, scale, x1, p1, x2, p2) = out load_typei = load.valid_types.index(load.load_type) + 1 # 1-based scale = load.valid_scales.index(load.scale) + 1 data = [ load.sid, load.eid, load_typei, scale, load.x1, load.p1, load.x2, load.p2 ] op2_ascii.write(' PLOAD1 data=%s\n' % str(data)) op2.write(spack.pack(*data)) return nbytes
def _write_desvar(model: Union[BDF, OP2Geom], name: str, desvar_ids: List[int], ncards: int, op2, op2_ascii, endian: bytes) -> int: """ (3106, 31, 352) NX 2019.2 Word Name Type Description 1 ID I Unique design variable identification number 2 LABEL(2) CHAR4 User-supplied name for printing purposes 4 XINIT RS Initial value 5 XLB RS Lower bound 6 XUB RS Upper bound 7 DELXV RS Fractional change allowed for the design variable during approximate optimization 8 DDVAL I ID of a DDVAL entry that provides a set of allowable discrete values """ key = (3106, 31, 352) structi = Struct(endian + b'i8s ffff i') nvalues = 8 * ncards nbytes = write_header_nvalues(name, nvalues, key, op2, op2_ascii) for desvar_id in desvar_ids: desvar = model.desvars[desvar_id] label = desvar.label label_bytes = ('%-8s' % label).encode('ascii') xinit = desvar.xinit xlb = desvar.xlb xub = desvar.xub delx = desvar.delx ddval = desvar.ddval if delx is None: delx = 0 if ddval is None: ddval = 0 data = [desvar_id, label_bytes, xinit, xlb, xub, delx, ddval] assert None not in data, data #print(data) op2_ascii.write(f' DESVAR data={data}\n') op2.write(structi.pack(*data)) return nbytes
def _write_caero1(model: Union[BDF, OP2Geom], name: str, caero_ids: List[int], ncards: int, op2_file, op2_ascii, endian: bytes) -> int: """ MSC 2018.2 Word Name Type Description 1 EID I 2 PID I 3 CP I 4 NSPAN I 5 NCHORD I 6 LSPAN I 7 LCHORD I 8 IGID I 9 X1 RS 10 Y1 RS 11 Z1 RS 12 X12 RS 13 X4 RS 14 Y4 RS 15 Z4 RS 16 X43 RS """ key = (3002, 30, 263) nfields = 16 structi = Struct(endian + b'8i 8f') nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii) for caero_id in caero_ids: caero = model.caeros[caero_id] # type: CAERO1 x1, y1, z1 = caero.p1 x4, y4, z4 = caero.p4 #print(caero.get_stats()) data = [caero.eid, caero.pid, caero.cp, caero.nspan, caero.nchord, caero.lspan, caero.lchord, caero.igroup, x1, y1, z1, caero.x12, x4, y4, z4, caero.x43] assert None not in data, data op2_ascii.write(f' CAERO1 data={data}\n') op2_file.write(structi.pack(*data)) return nbytes
def parseLine(self, line): "Parse a gcode line." binary16BytePreferences = self.binary16BytePreferences splitLine = line.split() if len(splitLine) < 1: return firstWord = splitLine[0] if len(firstWord) < 1: return firstLetter = firstWord[0] if firstLetter == '(': return feedrateInteger = getIntegerFromCharacterLengthLineOffset( 'F', 0.0, splitLine, binary16BytePreferences.feedrateStepLength.value) iInteger = getIntegerFromCharacterLengthLineOffset( 'I', 0.0, splitLine, binary16BytePreferences.xStepLength.value) jInteger = getIntegerFromCharacterLengthLineOffset( 'J', 0.0, splitLine, binary16BytePreferences.yStepLength.value) xInteger = getIntegerFromCharacterLengthLineOffset( 'X', binary16BytePreferences.xOffset.value, splitLine, binary16BytePreferences.xStepLength.value) yInteger = getIntegerFromCharacterLengthLineOffset( 'Y', binary16BytePreferences.yOffset.value, splitLine, binary16BytePreferences.yStepLength.value) zInteger = getIntegerFromCharacterLengthLineOffset( 'Z', binary16BytePreferences.zOffset.value, splitLine, binary16BytePreferences.zStepLength.value) sixteenByteStruct = Struct('cBhhhhhhBc') # print( 'xInteger' ) # print( xInteger ) flagInteger = getIntegerFlagFromCharacterSplitLine('X', splitLine) flagInteger += 2 * getIntegerFlagFromCharacterSplitLine('Y', splitLine) flagInteger += 4 * getIntegerFlagFromCharacterSplitLine('Z', splitLine) flagInteger += 8 * getIntegerFlagFromCharacterSplitLine('I', splitLine) flagInteger += 16 * getIntegerFlagFromCharacterSplitLine( 'J', splitLine) flagInteger += 32 * getIntegerFlagFromCharacterSplitLine( 'F', splitLine) packedString = sixteenByteStruct.pack(firstLetter, int(firstWord[1:]), xInteger, yInteger, zInteger, iInteger, jInteger, feedrateInteger, flagInteger, '#') self.output.write(packedString)
def write_binary_stl(self, stl_filename, normalize_normal_vectors=False, stop_on_failure=True): """ Write an STL binary file Parameters ---------- stl_filename : str the filename to read normalize_normal_vectors : bool; default=False should the vectors be normalized """ with open(stl_filename, 'wb') as infile: if hasattr(self, 'header'): self.header.ljust(80, '\0') header = '%-80s' % self.header[:80] else: header = '%-80s' % stl_filename[:80] infile.write(pack(b'80s', header.encode('ascii'))) #avector = [0., 0., 0.] #bvector = [0., 0., 0.] #cvector = [0., 0., 0.] nelements = self.elements.shape[0] infile.write(pack('i', nelements)) elements = self.elements p1 = self.nodes[elements[:, 0], :] p2 = self.nodes[elements[:, 1], :] p3 = self.nodes[elements[:, 2], :] avector = p2 - p1 bvector = p3 - p1 n = np.cross(avector, bvector) del avector, bvector if normalize_normal_vectors: n /= np.linalg.norm(n, axis=1)[:, np.newaxis] s = Struct('12fH') for eid, unused_element in enumerate(elements): data = s.pack(n[eid, 0], n[eid, 1], n[eid, 2], p1[eid, 0], p1[eid, 1], p1[eid, 2], p2[eid, 0], p2[eid, 1], p2[eid, 2], p3[eid, 0], p3[eid, 1], p3[eid, 2], 0) infile.write(data)
def pack(self): return Struct.pack(self, self.SPL_STANDARD_MESSAGE_STRUCT_HEADER, self.SPL_STANDARD_MESSAGE_STRUCT_VERSION, self.packetNumber, self.playersPerTeam, self.competitionPhase, self.competitionType, self.gamePhase, self.gameState, self.setPlay, self.firstHalf, self.kickingTeam, self.dropInTeam, self.dropInTime, self.secsRemaining, self.secondaryTime )
class AlarmFormat(BaseFormat): def __init__(self): super().__init__() self._dataStruct = Struct("<BBHIBBHI") self._byteArray = None self._type = PAYLOAD_TYPE.ALARM self._version = 0 self._dummy = 0 self._timestamp = 0 self._alarmCode = 0 self._param = 0 def __repr__(self): return f"""{{ "version" : {self._version}, "timestamp" : {self._timestamp}, "alarmType" : {self._alarmType}, "alarmCode" : {self._alarmCode}, "param" : {self._param} }}""" def fromByteArray(self, byteArray): self._byteArray = byteArray (self._version, self._dummy, self._dummy, self._timestamp, self._alarmType, self._alarmCode, self._dummy, self._param) = self._dataStruct.unpack(self._byteArray) def toByteArray(self): self._byteArray = self._dataStruct.pack(self._RPI_VERSION, self._dummy, self._dummy, self._timestamp, self._alarmType, self._alarmCode, self._dummy, self._param) def getDict(self): data = { "version": self._version, "alarmType": self._alarmType, "alarmCode": self._alarmCode, "timestamp": self._timestamp, "param": self._param } return data
def parseLine(self, line): "Parse a gcode line." binary16ByteRepository = self.binary16ByteRepository splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if len(firstWord) < 1: return firstLetter = firstWord[0] if firstLetter == '(': return feedRateInteger = getIntegerFromCharacterLengthLineOffset( 'F', 0.0, splitLine, binary16ByteRepository.feedRateStepLength.value) iInteger = getIntegerFromCharacterLengthLineOffset( 'I', 0.0, splitLine, binary16ByteRepository.xStepLength.value) jInteger = getIntegerFromCharacterLengthLineOffset( 'J', 0.0, splitLine, binary16ByteRepository.yStepLength.value) xInteger = getIntegerFromCharacterLengthLineOffset( 'X', binary16ByteRepository.xOffset.value, splitLine, binary16ByteRepository.xStepLength.value) yInteger = getIntegerFromCharacterLengthLineOffset( 'Y', binary16ByteRepository.yOffset.value, splitLine, binary16ByteRepository.yStepLength.value) zInteger = getIntegerFromCharacterLengthLineOffset( 'Z', binary16ByteRepository.zOffset.value, splitLine, binary16ByteRepository.zStepLength.value) sixteenByteStruct = Struct('cBhhhhhhBc') # print( 'xInteger' ) # print( xInteger ) flagInteger = getIntegerFlagFromCharacterSplitLine('X', splitLine) flagInteger += 2 * getIntegerFlagFromCharacterSplitLine('Y', splitLine) flagInteger += 4 * getIntegerFlagFromCharacterSplitLine('Z', splitLine) flagInteger += 8 * getIntegerFlagFromCharacterSplitLine('I', splitLine) flagInteger += 16 * getIntegerFlagFromCharacterSplitLine( 'J', splitLine) flagInteger += 32 * getIntegerFlagFromCharacterSplitLine( 'F', splitLine) packedString = sixteenByteStruct.pack(firstLetter, int(firstWord[1:]), xInteger, yInteger, zInteger, iInteger, jInteger, feedRateInteger, flagInteger, '#') self.output.write(packedString)
class String(BinStruct): """ String contains 0 or more characters. Sting size is dynamically allocated. """ _type = str def __init__(self, length_format='!I'): self.length_format = length_format self.size_struct = SStruct(length_format) @property def size(self): raise SizeNotDefined() def unpack(self, msg): """ Unpack string, little ugly but works """ if len(msg) != self.custom_size: raise CannotPack("Got message with wrong length!") #x = [] #if python3: # # Convert to bytes # #for i in range(len(msg)): # # x.append(chr(msg[i]).encode("utf-8")) # #msg = x # return msg.decode("utf-8") if python3: return msg.decode("utf-8") s = [chr(unpack('!B', msg[i])[0]) for i in range(self.custom_size)] return ''.join(s) def pack(self, msg): """ Pack string with length """ if not python3: if type(msg) == unicode: msg = msg.encode("utf-8") else: msg = bytearray(msg, 'utf-8') st = self.size_struct.pack(len(msg)) return st + msg
class MCUComm: def __init__(self): """ One circuit is defined as a string containing 5 fields | id_ (16 bits) | Power (32) | Irms**2 (32) | Vrms**2 (32) | """ self.pkg = Struct("<Hfff") def unpack(self, string): m = Measurement() m.set(*self.pkg.unpack(string)) return m def pack(self, measurement): return self.pkg.pack(*measurement.get_mcu()) def read(self, cmd): a = reduce(lambda s, c: s + chr(int(c)), cmd, "") return self.unpack(a)
def _write_solid(model, name, eids, nelements, itable, op2_file, op2_ascii, endian): """writes the solid elements""" if name == 'CTETRA': key = (5508, 55, 217) nnodes = 10 # 12 = eid, pid, n1, n2, n3, n4, ..., n10 elif name == 'CHEXA': key = (7308, 73, 253) nnodes = 20 elif name == 'CPENTA': key = (4108, 41, 280) nnodes = 15 elif name == 'CPYRAM': key = (17200, 172, 1000) # it's 13, but there's a 14th node just because... nnodes = 14 else: # pragma: no cover raise NotImplementedError(name) nfields = nnodes + 2 spack = Struct(endian + b'%ii' % (nfields)) nbytes = _write_intermediate_block(name, key, nfields, nelements, op2_file, op2_ascii) for eid in sorted(eids): elem = model.elements[eid] nids = elem.node_ids pid = elem.pid if None in nids: nids = [nid if nid is not None else 0 for nid in nids] nnids = len(nids) if nnids < nnodes: nids2 = [0] * (nnodes - nnids) data = [eid, pid] + nids + nids2 else: data = [eid, pid] + nids #print(name, data) op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids))) op2_file.write(spack.pack(*data)) itable = _write_end_block(nbytes, itable, op2_file, op2_ascii) return itable
def _get_character_data(self, character): """ Return the character data needed for this character. It includes a lot of general information, one equipment entry per not-bag item, and add the first 16-slot bag after that, because why not. """ name_bytes = character.name.encode("utf8") + b"\x00" char_struct_fmt = self.CHAR_FMT.format(name_len=len(name_bytes)) char_struct = Struct(char_struct_fmt) char_data = char_struct.pack( character.guid, name_bytes, character.race, character.class_id, character.gender, character.features.skin, character.features.face, character.features.hair_style, character.features.hair_color, character.features.facial_hair, character.stats.level, character.position.zone_id, character.position.map_id, character.position.pos_x, character.position.pos_y, character.position.pos_z, 0, # guild 0, # char flags? 0, # first login 0, # pet display 0, # pet level 0 # pet family ) char_equipments = [] for _ in range(CharacterEquipSlot.HEAD.value, CharacterEquipSlot.TABARD.value + 1): equipment_data = self.CHAR_EQUIPMENT_BIN.pack(0, 0) char_equipments.append(equipment_data) first_bag_data = self.CHAR_EQUIPMENT_BIN.pack(0, 0) char_equipments.append(first_bag_data) char_equipment_data = b"".join(char_equipments) return char_data + char_equipment_data
class FormatField(StaticField): """ A field that uses ``struct`` to pack and unpack data. See ``struct`` documentation for instructions on crafting format strings. :param str name: name of the field :param str endianness: format endianness string; one of "<", ">", or "=" :param str format: a single format character """ __slots__ = ["packer"] def __init__(self, name, endianity, format): if endianity not in (">", "<", "="): raise ValueError("endianity must be be '=', '<', or '>'", endianity) if len(format) != 1: raise ValueError("must specify one and only one format char") self.packer = Packer(endianity + format) StaticField.__init__(self, name, self.packer.size) def __getstate__(self): attrs = StaticField.__getstate__(self) attrs["packer"] = attrs["packer"].format return attrs def __setstate__(self, attrs): attrs["packer"] = Packer(attrs["packer"]) return StaticField.__setstate__(attrs) def _parse(self, stream, context): try: return self.packer.unpack(_read_stream(stream, self.length))[0] except Exception as ex: raise FieldError(ex) def _build(self, obj, stream, context): try: _write_stream(stream, self.length, self.packer.pack(obj)) except Exception as ex: raise FieldError(ex)
def write_op2_header(model: OP2, op2_file, fop2_ascii, struct_3i: Struct, post: int=-1, endian: bytes=b'<'): """writes the op2 header""" is_nx = model.is_nx is_msc = model.is_msc #is_nasa95 = model.is_nasa95 is_optistruct = model.is_optistruct if model.date == (1, 1, 2000): # (7, 24, 2020) today = datetime.datetime.today() model.date = (today.month, today.day, today.year) if post == -1: #_write_markers(op2_file, op2_ascii, [3, 0, 7]) op2_file.write(struct_3i.pack(*[4, 3, 4,])) tape_code = b'NASTRAN FORT TAPE ID CODE - ' if is_nx: op2_file.write(pack(endian + b'7i 28s i', *[4, 1, 4, 4, 7, 4, 28, tape_code, 28])) nastran_version = b'NX8.5 ' elif is_msc or is_optistruct: day, month, year = model.date op2_file.write(pack(endian + b'9i 28s i', *[12, day, month, year - 2000, 12, 4, 7, 4, 28, tape_code, 28])) nastran_version = b'XXXXXXXX' else: raise NotImplementedError(model._nastran_format) op2_file.write(pack(endian + b'4i 8s i', *[4, 2, 4, #4, 2, 4, #4, 1, 4, #4, 8, 4, 8, nastran_version, 8])) op2_file.write(pack(endian + b'6i', *[4, -1, 4, 4, 0, 4,])) elif post == -2: _write_markers(op2_file, fop2_ascii, [2, 4]) else: raise RuntimeError(f'post = {post:d}; use -1 or -2')
def _write_mats1(model, name, mids, nmaterials, op2, op2_ascii, endian): """writes the MATS1""" key = (503, 5, 90) nfields = 11 spack = Struct(endian + b'3ifiiff3i') nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii) for mid in sorted(mids): mat = model.MATS1[mid] #(mid, tid, Type, h, yf, hr, limit1, limit2, a, bmat, c) = out a = 0 bmat = 0 c = 0 if mat.Type == 'NLELAST': Type = 1 elif mat.Type == 'PLASTIC': Type = 2 elif mat.Type == 'PLSTRN': Type = 3 else: raise RuntimeError( f'Invalid Type: Type={mat.Type}; must be 1=NLELAST ' '2=PLASTIC or 3=PLSTRN') data = [ mid, # not sure 0 if mat.tid is None else mat.tid, Type, 0.0 if mat.h is None else mat.h, 0 if mat.yf is None else mat.yf, 0 if mat.hr is None else mat.hr, 0.0 if mat.limit1 is None else mat.limit1, 0.0 if mat.limit2 is None else mat.limit2, a, bmat, c ] assert None not in data, 'MATS1 %s' % data assert len(data) == nfields op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:])) op2.write(spack.pack(*data)) return nbytes
class Bytes(Unit): def __init__(self, length: int): self.length = length if length >= 0: self._struct = Struct(f"{length}s") def __str__(self): return f"{self.__class__.__name__}({self.length})" def get_value(self): if self.length >= 0: return (yield from read_raw_struct(self._struct))[0] else: return (yield from read()) def __call__(self, obj) -> bytes: if self.length >= 0: return self._struct.pack(obj) else: return obj
def enet_stom(string): # string to mac # NOTE Should enet_aton be smarter? e.g. accept 0::0? if isinstance(string, str): if len(string) == 17: # TODO Improve this function, it should accept any type of 'splitter' for splitter in [':', '-', ' ']: if splitter in string: unsigned_char = Struct('!B') mac_address = b'' for byte in string.split(splitter): mac_address += unsigned_char.pack(int(byte, 16)) return mac_address else: # TODO Improve 'malformed' checking raise ValueError('string seems to be malformed') elif len(string) == 12: return Ethernet.enet_itom(int(string, 16)) raise ValueError('string out of supported range') raise_type('string', str, type(string))
def _write_mat3(model, name, mids, nmaterials, op2, op2_ascii, endian): """writes the MAT3""" key = (1403, 14, 122) nfields = 16 spack = Struct(endian + b'i8fi5fi') nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii) for mid in sorted(mids): mat = model.materials[mid] #(mid, ex, eth, ez, nuxth, nuthz, nuzx, rho, gzx, #blank, ax, ath, az, tref, ge, blank) = out data = [ mid, mat.ex, mat.eth, mat.ez, mat.nuxth, mat.nuthz, mat.nuzx, mat.rho, 0.0 if mat.gzx is None else mat.gzx, 0, mat.ax, mat.ath, mat.az, mat.tref, mat.ge, 0 ] assert None not in data, 'MAT3 %s' % data op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:])) op2.write(spack.pack(*data)) return nbytes
def _write_mat8(model, name, mids, nmaterials, op2, op2_ascii, endian): """writes the MAT8""" key = (2503, 25, 288) nfields = 19 spack = Struct(endian + b'i18f') nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii) for mid in sorted(mids): mat = model.materials[mid] #(mid, E1, E2, nu12, G12, G1z, G2z, rho, a1, a2, # tref, Xt, Xc, Yt, Yc, S, ge, f12, strn) = out data = [ mid, mat.e11, mat.e22, mat.nu12, mat.g12, mat.g1z, mat.g2z, mat.rho, mat.a1, mat.a2, mat.tref, mat.Xt, mat.Xc, mat.Yt, mat.Yc, mat.S, mat.ge, mat.F12, mat.strn ] #print('MAT8 -', data) op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:])) op2.write(spack.pack(*data)) return nbytes
class AnyStruct: def __init__(self, name, fields): self.ntuple_cls = namedtuple(name, [f[0] for f in fields]) self.struct = Struct('<' + ''.join([f[1] for f in fields])) self.tupling = get_index_of_tuples(fields, 2, 1) self.frombin = get_index_of_tuples(fields, 3, noop) self.tobin = get_index_of_tuples(fields, 4, noop) @property def size(self): return self.struct.size def unpack(self, bs): pt = self.struct.unpack(bs) t = [] pti = iter(pt) for sz, conv_func in zip(self.tupling, self.frombin): if sz == 1: value = next(pti) else: value = tuple(next(pti) for i in range(sz)) t.append(conv_func(value)) return self.ntuple_cls._make(t) def funpack(self, f): return self.unpack(f.read(self.size)) def pack(self, *a, **kw): t = self.ntuple_cls(*a, **kw) pt = [] for value, sz, conv_func in zip(t, self.tupling, self.tobin): value = conv_func(value) if sz == 1: pt.append(value) else: assert len(value) == sz pt.extend(value) return self.struct.pack(*pt) def fpack(self, f, *a, **kw): return f.write(self.pack(*a, **kw))