def read_frame(f): try: ck = chunk.Chunk(f, False, False, True) except EOFError: return False, None if ck.getname() != b'CYCF': print("Not a cycloid IFF log file (got ", ck.getname(), "?)") return False, None # read timestamp header ts = struct.unpack("=II", ck.read(8)) framedata = {'tstamp': ts[0] + ts[1] / 1000000.} # read all embedded chunks while True: try: ick = chunk.Chunk(ck, False, False, True) except EOFError: break n = ick.getname() if n == b'CSta': # car state data = struct.unpack("=bbffffffBHHHHHHHH", ick.read()) throttle, steering = data[0:2] accel = np.float32(data[2:5]) gyro = np.float32(data[5:8]) servo = data[8] wheels = np.uint16(data[9:13]) periods = np.uint16(data[13:17]) framedata['carstate'] = (throttle, steering, accel, gyro, servo, wheels, periods) elif n == b'MCL4': # monte carlo localization, 4-float state (particles w/ heading) framedata['particles'] = np.frombuffer(ick.read(), np.float32).reshape((-1, 4)) elif n == b'aCDF': # activation CDF, new thing framedata['activations'] = np.frombuffer(ick.read(), np.int32) elif n == b'LM01': # expected landmark location numL, = struct.unpack('B', ick.read(1)) c0c1 = np.frombuffer(ick.read(), np.uint16).reshape((-1, numL)) nP = c0c1.shape[0] // 2 framedata['c0'] = c0c1[:nP] framedata['c1'] = c0c1[nP:] elif n == b'CTLs': # controller state framedata['controldata'] = struct.unpack("=17f", ick.read()) elif n == b'CTL2': # controller state framedata['controldata'] = struct.unpack("=26f", ick.read()) elif n == b'Y420': # YUV420 frame w, = struct.unpack('=H', ick.read(2)) framedata['yuv420'] = np.frombuffer(ick.read(), np.uint8).reshape( (-1, w)) else: ick.skip() return True, framedata
def _read_chunk_internal(parent_chunk: chunk.Chunk, parent: "WavefileChunk"): try: while True: c = chunk.Chunk(parent_chunk, bigendian=False) if c.getname() in [b"RIFF", b"LIST"]: riff_type = c.read(4) wavchunk = WavefileChunk( c.getname(), c.getsize(), riff_type, b"", []) WavefileChunk._read_chunk_internal(c, wavchunk) else: if c.getname() != b"data": wavchunk = WavefileChunk( c.getname(), c.getsize(), b"", c.read(), []) else: wavchunk = WavefileChunk( c.getname(), c.getsize(), b"", b"", []) c.close() parent.children.append(wavchunk) except EOFError: pass
def parseChunks(fi): ret = None totalsamples = 0 f2 = open(fi, 'rb') d = f2.read(4) if d != b'FORM': return None d = f2.read(4) d = f2.read(4) if d != b'AIFF' and d != b'AIFC': return None for k in range(40): try: c = chunk.Chunk(f2) print("chunk %s, %d bytes" % (c.getname(), c.getsize())) if c.getname() == b'SSND': totalsamples = c.getsize() / 2 # for samples, not bytes if c.getname() == b'APPL': f4 = c.read(4) if f4 == b'op-1': s = c.read(c.getsize() - 4) s = s.decode('utf-8').strip('\0').strip() j = json.loads(s) ret = j c.skip() except EOFError: pass f2.close() return (ret, totalsamples)
def getOP1chunk(fi): ret = None f2 = open(fi, 'rb') d = f2.read(4) if d != b'FORM': return None d = f2.read(4) d = f2.read(4) if d != b'AIFF' and d != b'AIFC': return None for k in range(10): try: c = chunk.Chunk(f2) print("chunk %s, %d bytes" % (c.getname(), c.getsize())) if c.getname() == b'APPL': f4 = c.read(4) if f4 == b'op-1': s = c.read(c.getsize() - 4) s = s.decode('utf-8').strip('\0').strip() j = json.loads(s) ret = j c.skip() except EOFError: pass f2.close() return ret
def rmi2mid(rmi_file, mid_file): ''' Extract Standard MIDI file from RIFF MIDI (RMID) file. :param rmi_file: Input RIFF MIDI (RMID) file path. (.rmi or .mid) :type rmi_file: str :param mid_file: Output Standard MIDI file path. (.mid) :type mid_file: str ''' with open(rmi_file, 'rb') as frmi: root = chunk.Chunk(frmi, bigendian=False) chunk_id = root.getname() if chunk_id == b'MThd': raise IOError("Already a Standard MIDI format file: %s" % rmi_file) elif chunk_id != b'RIFF': raise IOError("Not an RIFF file: %s" % rmi_file) chunk_size = root.getsize() chunk_raw = root.read(chunk_size) (hdr_id, hdr_data, midi_size) = struct.unpack("<4s4sL", chunk_raw[0:12]) if hdr_id != b'RMID' or hdr_data != b'data': raise IOError("Invalid or unsupported input file: %s" % rmi_file) try: midi_raw = chunk_raw[12:12 + midi_size] except IndexError: raise IOError("Broken input file: %s" % rmi_file) root.close() with open(mid_file, 'wb') as fmid: fmid.write(midi_raw)
def read_lwo(self): self.f = open(self.filename, "rb") try: header, chunk_size, chunk_name = struct.unpack(">4s1L4s", self.f.read(12)) except: self.error(f"Error parsing file header! Filename {self.filename}") self.f.close() return if not chunk_name in self.file_types: raise Exception( f"Incorrect file type: {chunk_name} not in {self.file_types}" ) self.file_type = chunk_name self.info(f"Importing LWO: {self.filename}") self.info(f"{self.file_type.decode('ascii')} Format") while True: try: self.rootchunk = chunk.Chunk(self.f) except EOFError: break self.parse_tags() del self.f
def _load_chunk_real(self, r, c): """ :param r: :param c: :return: """ chunk_key = (r, c) bx, by = self.rc2xy(r, c) # 从缓存中载入 cache_value = self._cache.get(chunk_key) if cache_value: cache_value.set_enabled(True) return cache_value # 从存档种载入 new_chunk = chunk.Chunk(self, bx, by, self._chunk_tile_count, self._chunk_tile_size) if self._storage_mgr: storage_data = self._storage_mgr.get(str((r, c))) if storage_data: new_chunk.on_load(self._spawner, storage_data) ground_data = new_chunk.get_ground_data() assert ground_data ground_np = self._ground_geom_util.create_ground_from_data(r, c, ground_data) time.sleep(0.001) block_body = self._create_block_bodies(r, c) new_chunk.set_ground_geom(ground_np, ground_data, block_body) return new_chunk # 生成tile物体和地形 time.sleep(0.001) block_body = self._create_block_bodies(r, c) plane_np, tiles_data = self._ground_geom_util.new_ground_geom(r, c) new_chunk.set_ground_geom(plane_np, tiles_data, block_body) # 遍历所有tile生成物体 # TODO 优化点,map_generator的get可以只调用一次吗? br = r * self._chunk_tile_count bc = c * self._chunk_tile_count for ir in range(br, br + self._chunk_tile_count): time.sleep(0.001) for ic in range(bc, bc + self._chunk_tile_count): ginfo = self._generator.get(ir, ic) if not ginfo: continue obj_info = ginfo.get('object') if obj_info: x = (ic + .5) * self._chunk_tile_size y = (ir + .5) * self._chunk_tile_size new_obj = self._spawner.spawn(x, y, obj_info) pos = new_obj.get_pos() assert abs(pos.get_x() - x) < 0.01, '%s,%s => %s' % (x, y, pos) assert new_obj # 确保spawner可以返回正确的值 ref_count = sys.getrefcount(new_obj) assert ref_count == 2, ref_count # 确保spawner自己不会占用引用. new_obj占一个引用,参数占一个引用 new_chunk.add_object(new_obj) assert sys.getrefcount(new_obj) == 3 # 确保被正确添加到了chunk中 return new_chunk
def _seek_to_data_chunk(f): assert f.read(12) == 'RIFF\x00\x00\x00\x00WAVE' while True: c = chunk.Chunk(f, bigendian=False) if c.getname() == 'data': break else: c.skip()
def fmtEnd(self, bytesIO): bytesIO.seek(Surger.riffSkip) # skip RIFF tag, size, format fields cc = chunk.Chunk(bytesIO, bigendian=False) # tee up first RIFF sub-chunk cn = b'' while cn != b'fmt ': # until we've traversed the 'fmt ' chunk: cn = cc.getname() # remember name of chunk cc.close() # advance to next chunk (its tag, not size or data) pos = bytesIO.tell() # remember where next chunk begins return pos
def new_generation(parents, size, number_of_best): generation = [] mutation_p = 0.05 while True: chunks = [] len_imgs = len(parents) ind1 = np.random.randint(low=0, high=len_imgs) ind2 = np.random.randint(low=0, high=len_imgs) while ind1 == ind2: ind2 = np.random.randint(low=0, high=len_imgs) img1 = parents[ind1] img2 = parents[ind2] for extra1, extra2 in zip(img1, img2): pos_1y, pos_1x = bits.int_to_bitfield(extra1.pos[0], H), bits.int_to_bitfield( extra1.pos[1], W) pos_2y, pos_2x = bits.int_to_bitfield(extra2.pos[0], H), bits.int_to_bitfield( extra2.pos[1], W) cross = np.random.randint(low=1, high=len(pos_1y) - 1) pos_chy = pos_1y[:cross] + pos_2y[cross:] cross = np.random.randint(low=1, high=len(pos_1x) - 1) pos_chx = pos_1x[:cross] + pos_2x[cross:] mutation_ps = np.random.random(len(pos_chx) + len(pos_chy)) for i, (x, y) in enumerate(zip(pos_chx, pos_chy)): if mutation_ps[i] < mutation_p: pos_chy[i] = not y if mutation_ps[i + len(pos_chy)] < mutation_p: pos_chx[i] = not x angle_1 = bits.int_to_bitfield(int(extra1.angle)) angle_2 = bits.int_to_bitfield(int(extra2.angle)) cross = np.random.randint(low=1, high=len(angle_1) - 1) angle_ch = angle_1[:cross] + angle_2[cross:] mutation_ps = np.random.random(len(angle_ch)) for i, a in enumerate(angle_ch): if mutation_ps[i] < mutation_p: angle_ch[i] = not a scale_chy = np.abs(np.random.normal(loc=extra1.scale[0], scale=1)) scale_chx = np.abs(np.random.normal(loc=extra1.scale[1], scale=1)) chunks.append( ch.Chunk(extra1.img, [scale_chy, scale_chx], bits.bitfield_to_int(angle_ch), [ bits.bitfield_to_int(pos_chy) % H, bits.bitfield_to_int(pos_chx) % W ])) generation.append(chunks) if len(parents) + len(generation) == size - number_of_best: break return parents + generation
def convert_to_file_with_gaps_between_chunks(src_file, gap_size, bigendian): name, ext = os.path.splitext(src_file) dst_file = name + '.gap' + ext int_format = '>L' if bigendian else '<L' with open(src_file, 'rb') as fdin: with open(dst_file, 'wb') as fdout: riff_or_form_chunk = chunk.Chunk(fdin, bigendian=bigendian) if riff_or_form_chunk.getname() == b'RIFF': format = riff_or_form_chunk.read(4) if format != b'WAVE': raise Exception('not a WAVE file') elif riff_or_form_chunk.getname() == b'FORM': format = riff_or_form_chunk.read(4) if format not in (b'AIFF', b'AIFC'): raise Exception('not an AIFF file') else: raise Exception('file does not start with RIFF or FORM id') fdout.write(riff_or_form_chunk.getname()) fdout.write(b'xxxx') # will fill this in at the end fdout.write(format) while True: try: subchunk = chunk.Chunk(riff_or_form_chunk, bigendian=bigendian) except EOFError: break chunkname = subchunk.getname() gap_size = 0 if chunkname in (b'data', b'SSND') else gap_size fdout.write(chunkname) fdout.write( struct.pack(int_format, subchunk.getsize() + gap_size)) fdout.write(subchunk.read()) for i in range(gap_size + (gap_size % 2)): fdout.write(b'0') subchunk.close() riff_or_form_chunk_size = fdout.tell() - 8 fdout.seek(4) fdout.write(struct.pack(int_format, riff_or_form_chunk_size))
def process_file(self): """Extract WAMD metadata from a .WAV file as a dict""" fname = self.filename with open(fname, 'rb') as f: ch = chunk.Chunk(f, bigendian=False) if ch.getname() != b'RIFF': raise Exception('%s is not a RIFF file!' % fname) if ch.read(4) != b'WAVE': raise Exception('%s is not a WAVE file!' % fname) wamd_chunk = None while True: try: subch = chunk.Chunk(ch, bigendian=False) except EOFError: break if subch.getname() == b'wamd': wamd_chunk = subch break else: subch.skip() if not wamd_chunk: raise ValueError('No wamd data chunk found') metadata = {} offset = 0 size = wamd_chunk.getsize() buf = wamd_chunk.read(size) while offset < size: id = struct.unpack_from('< H', buf, offset)[0] len = struct.unpack_from('< I', buf, offset + 2)[0] val = struct.unpack_from('< %ds' % len, buf, offset + 6)[0] if id not in WamdFile.WAMD_DROP_IDS: name = WamdFile.WAMD_IDS.get(id, id) val = WamdFile.WAMD_COERCE.get(name, WamdFile._parse_text)(val) metadata[name] = val offset += 6 + len self.metadata = metadata self.initialised = True
def __init__(self, w, h): self.chunks = [] # all chunks within world self.addp = [] # particles qued to be added to other chunks t = [Chunk()] * w self.chunks = t * h for x in xrange(0, w): hwe = [] # hwe for lack of a better name for y in xrange(0, h): hwe.append(chunk.Chunk(x, y)) self.chunks.append(hwe)
def new_chunk(self, pkt): ''' creates a new tcp.Chunk for the pkt to live in. Only called if an attempt has been made to merge the packet with all existing chunks. ''' chunk = tcp.Chunk() chunk.merge(pkt, self.create_merge_callback(pkt)) if self.seq_start and chunk.seq_start == self.seq_start: self.final_data_chunk = chunk self.final_arrival_pointer = chunk.seq_end self.final_arrival_data.insert((pkt.seq, pkt.ts)) self.chunks.insert(chunk)
def ScanIndex(self): idx = [] while True: idx.append(self.f.tell()) try: ck = chunk.Chunk(self.f, False, False, True) except EOFError: return idx[:-1] if ck.getname() != b'CYCF': raise Exception("Not a cycloid IFF log file (got " + ck.getname() + "?)") ck.skip()
def wamd(fname): """Extract WAMD metadata from a .WAV file as a dict""" with open(fname, 'rb') as f: ch = chunk.Chunk(f, bigendian=False) if ch.getname() != b'RIFF': raise Exception('%s is not a RIFF file!' % fname) if ch.read(4) != b'WAVE': raise Exception('%s is not a WAVE file!' % fname) wamd_chunk = None while True: try: subch = chunk.Chunk(ch, bigendian=False) except EOFError: break if subch.getname() == b'wamd': wamd_chunk = subch break else: subch.skip() if not wamd_chunk: raise Exception('"wamd" WAV chunk not found in file %s' % fname) metadata = {} offset = 0 size = wamd_chunk.getsize() buf = wamd_chunk.read(size) while offset < size: id = struct.unpack_from('< H', buf, offset)[0] len = struct.unpack_from('< I', buf, offset + 2)[0] val = struct.unpack_from('< %ds' % len, buf, offset + 6)[0] if id not in WAMD_DROP_IDS: name = WAMD_IDS.get(id, id) val = WAMD_COERCE.get(name, _parse_text)(val) metadata[name] = val offset += 6 + len metadata = stringifyKeys(metadata) return metadata
def read_riff(self, filename): """ Read a RIFF file and put it in a RiffChunk object :param filename: The path to the RIFF file :return: A RiffChunk Object """ with open(filename, 'rb') as file: current_chunk = chunk.Chunk(file, bigendian=self.bigendian) riff_name = current_chunk.getname() riff_size = current_chunk.getsize() - CHUNKTYPE_SIZE riff_type = current_chunk.read(CHUNKTYPE_SIZE) sub_chunks = self.read_chunks(riff_size, file) riff_chunk = RiffChunk(riff_name, sub_chunks, riff_type) return riff_chunk
def fuzz(): with open(sys.argv[1], "rb") as fd: try: cd = chunk.Chunk(fd) cd.getname() cd.getsize() cd.tell() while cd.read(64): pass cd.skip() while cd.read(64): pass cd.close() except EOFError: pass
def read_header(f): try: ck = chunk.Chunk(f, False, False, True) except EOFError: return False, None if ck.getname() == b'cfg1': hdr = ck.read() print("read header: ", struct.unpack("=%dh" % (len(hdr) / 2), hdr)) return True, hdr if ck.getname() == b'CYCF': f.seek(0) return True, None else: print("Not a cycloid IFF log file (got ", ck.getname(), "?)") return False, None
def read_lwob(self): """Read version 1 file, LW < 6.""" self.last_pols_count = 0 print(f"Importing LWO: {self.filename}\nLWO v1 Format") while True: try: rootchunk = chunk.Chunk(self.f) except EOFError: break if rootchunk.chunkname == b"SRFS": read_tags(rootchunk.read(), self) elif rootchunk.chunkname == b"LAYR": read_layr_5(rootchunk.read(), self.layers) elif rootchunk.chunkname == b"PNTS": if len(self.layers) == 0: # LWOB files have no LAYR chunk to set this up. nlayer = _obj_layer() nlayer.name = "Layer 1" self.layers.append(nlayer) read_pnts(rootchunk.read(), self.layers) elif rootchunk.chunkname == b"POLS": self.last_pols_count = read_pols_5(rootchunk.read(), self.layers) elif rootchunk.chunkname == b"PCHS": self.last_pols_count = read_pols_5(rootchunk.read(), self.layers) self.layers[-1].has_subds = True elif rootchunk.chunkname == b"PTAG": (tag_type, ) = struct.unpack("4s", rootchunk.read(4)) if tag_type == b"SURF": raise Exception("Missing commented out function") # read_surf_tags_5( # rootchunk.read(), self.layers, self.last_pols_count # ) else: rootchunk.skip() elif rootchunk.chunkname == b"SURF": read_surf_5(rootchunk.read(), self) else: # For Debugging \/. # if handle_layer: print(f"Skipping Chunk: {rootchunk.chunkname}") rootchunk.skip()
def set_block(self, position, number): # set number to 0 (air) to remove block x, y, z = position chunk_position = self.get_chunk_position(position) if not chunk_position in self.chunks: # if no chunks exist at this position, create a new one if number == 0: return # no point in creating a whole new chunk if we're not gonna be adding anything self.chunks[chunk_position] = chunk.Chunk(self, chunk_position) if self.get_block_number( position ) == number: # no point updating mesh if the block is the same return lx, ly, lz = self.get_local_position(position) self.chunks[chunk_position].blocks[lx][ly][lz] = number self.chunks[chunk_position].modified = True self.chunks[chunk_position].update_at_position((x, y, z)) self.chunks[chunk_position].update_mesh() cx, cy, cz = chunk_position def try_update_chunk_at_position(chunk_position, position): if chunk_position in self.chunks: self.chunks[chunk_position].update_at_position(position) self.chunks[chunk_position].update_mesh() if lx == chunk.CHUNK_WIDTH - 1: try_update_chunk_at_position((cx + 1, cy, cz), (x + 1, y, z)) if lx == 0: try_update_chunk_at_position((cx - 1, cy, cz), (x - 1, y, z)) if ly == chunk.CHUNK_HEIGHT - 1: try_update_chunk_at_position((cx, cy + 1, cz), (x, y + 1, z)) if ly == 0: try_update_chunk_at_position((cx, cy - 1, cz), (x, y - 1, z)) if lz == chunk.CHUNK_LENGTH - 1: try_update_chunk_at_position((cx, cy, cz + 1), (x, y, z + 1)) if lz == 0: try_update_chunk_at_position((cx, cy, cz - 1), (x, y, z - 1))
def read_chunk(self, file): """Read a chunk starting at the actual position of the file handler :param file: The file handler :return: a Chunk """ current_chunk = chunk.Chunk(file, bigendian=self.bigendian) chunk_name = current_chunk.getname() print(chunk_name) chunk_size = current_chunk.getsize() if chunk_name == b'LIST': chunk_type = current_chunk.read(CHUNKTYPE_SIZE) sub_chunks = self.read_chunks(chunk_size - CHUNKTYPE_SIZE, file) return_chunk = ListChunk(chunk_name, sub_chunks, chunk_type) else: chunk_data = Data(current_chunk.read(chunk_size)) return_chunk = FinalChunk(chunk_name, chunk_data) return return_chunk
def search_list(file, limit): chunks = list() bytes_explored = 0 while (bytes_explored < limit): explored_chunk = None try: current_chunk = chunk.Chunk(file, bigendian=False) current_chunk_name = current_chunk.getname() current_chunk_size = current_chunk.getsize() # if it's a LIST chunk call recurivelly this function if current_chunk_name == b'LIST': current_chunk_type = current_chunk.read(4) explored_chunk = ListChunk(current_chunk_name, current_chunk_size, current_chunk_type) subchunks = search_list(file, current_chunk_size - 4) explored_chunk.add_subchunks(subchunks) # if it's a basic chunk call the utils function to collect its fields ('parse_chunk_data') elif bytes_explored + current_chunk_size + 8 <= limit: explored_chunk = LeafChunk(current_chunk_name, current_chunk_size) utils.parse_chunk_data(file, current_chunk_name.decode('utf-8'), current_chunk, explored_chunk, current_chunk_size) # to avoid the chunk to be inserted in the wrong LIST chunk else: file.seek(-8, 1) except EOFError: print('eof') break #return if explored_chunk is not None: bytes_explored = bytes_explored + explored_chunk.get_size() + 8 chunks.append(explored_chunk) else: break return chunks
def readLwo2(file, filename, layers, surfs, tags): ''' Read version 2 file, LW 6+. ''' last_pols_count = 0 layer = None while True: try: rootchunk = chunk.Chunk(file) except EOFError: break if rootchunk.chunkname == b'TAGS': readTags(rootchunk.read(), tags) elif rootchunk.chunkname == b'BBOX': rootchunk.skip() elif rootchunk.chunkname == b'LAYR': layer = readLayr2(rootchunk.read(), layers) elif rootchunk.chunkname == b'PNTS': readPoints(rootchunk.read(), layer) elif rootchunk.chunkname == b'POLS': sub_type = rootchunk.read(4) # PTCH is LW's Subpatches, SUBD is CatmullClark. if (sub_type == b'FACE' or sub_type == b'PTCH' or sub_type == b'SUBD'): last_pols_count = readPols2(rootchunk.read(), layer) if sub_type == b'FACE': layer.has_subds = True # else: # FreeCAD.Console.PrintMessage(" Skipping POLS.%s\n" % sub_type) rootchunk.skip() elif rootchunk.chunkname == b'PTAG': sub_type, = struct.unpack("4s", rootchunk.read(4)) if sub_type == b'SURF': readSurfTags(rootchunk.read(), layer, last_pols_count) # else: # FreeCAD.Console.PrintMessage(" Skipping PTAG.%s\n" % sub_type) rootchunk.skip() elif rootchunk.chunkname == b'SURF': readSurf2(rootchunk.read(), surfs) # else: # FreeCAD.Console.PrintMessage(" Skipping %s\n" % rootchunk.chunkname) rootchunk.skip()
def import_gadds_frame(self, mask_radius="auto"): """Import a two-dimensional X-ray diffraction pattern from a GADDS system detector, typically using the .gfrm extension. """ filename = self.filename with open(filename, 'rb') as f: raw_chunk = chunk.Chunk(f) # Find out how big the header is raw_chunk.seek(152) preamble = raw_chunk.read(50) assert preamble[0:8] == b"HDRBLKS:" hdrblks = int(preamble[9:]) raw_chunk.seek(0) # Read in the header header = raw_chunk.read(hdrblks * 512 - 8) # Determine image dimensions from header metadata = self._read_header(header) frame_shape = shape(rows=int(metadata['nrows']), columns=int(metadata['ncols'])) data_length = frame_shape.rows * frame_shape.columns data_bytes = raw_chunk.read(data_length) # leftover = raw_chunk.read() # Not used # Prepare an array of the image data data = np.fromstring(data_bytes, dtype=np.dtype('u1')) data = data.reshape(frame_shape) # Apply a round mask x, y = np.ogrid[:frame_shape.rows, :frame_shape.columns] c = Pixel(vertical=frame_shape.rows / 2, horizontal=frame_shape.columns / 2) # convert cartesian --> polar coordinates dx = x - c.horizontal dy = y - c.vertical r2 = (dx * dx) + (dy * dy) # Determine radius from image dimensions if mask_radius == "auto": radius = 0.95 * min(frame_shape) / 2 circmask = np.logical_not(r2 <= radius * radius) masked_data = np.ma.array(data, mask=circmask) return masked_data
def read_lwob(file, filename, layers, surfs, tags, add_subd_mod): """Read version 1 file, LW < 6.""" last_pols_count = 0 print("Importing LWO: " + filename + "\nLWO v1 Format") while True: try: rootchunk = chunk.Chunk(file) except EOFError: break if rootchunk.chunkname == b'SRFS': read_tags(rootchunk.read(), tags) elif rootchunk.chunkname == b'LAYR': read_layr_5(rootchunk.read(), layers) elif rootchunk.chunkname == b'PNTS': if len(layers) == 0: # LWOB files have no LAYR chunk to set this up. nlayer = _obj_layer() nlayer.name = "Layer 1" layers.append(nlayer) read_pnts(rootchunk.read(), layers) elif rootchunk.chunkname == b'POLS': last_pols_count = read_pols_5(rootchunk.read(), layers) elif rootchunk.chunkname == b'PCHS': last_pols_count = read_pols_5(rootchunk.read(), layers) layers[-1].has_subds = True elif rootchunk.chunkname == b'PTAG': tag_type, = struct.unpack("4s", rootchunk.read(4)) if tag_type == b'SURF': read_surf_tags_5(rootchunk.read(), layers, last_pols_count) else: rootchunk.skip() elif rootchunk.chunkname == b'SURF': read_surf_5(rootchunk.read(), surfs) else: # For Debugging \/. #if handle_layer: #print("Skipping Chunk: ", rootchunk.chunkname) rootchunk.skip()
def readLwob(file, filename, layers, surfs, tags): ''' Read version 1 file, LW < 6. ''' last_pols_count = 0 layer = None while True: try: rootchunk = chunk.Chunk(file) except EOFError: break if rootchunk.chunkname == b'SRFS': readTags(rootchunk.read(), tags) elif rootchunk.chunkname == b'LAYR': layer = readLayr1(rootchunk.read(), layers) elif rootchunk.chunkname == b'PNTS': if (layer is None): # LWOB files have no LAYR chunk to set this up. layer = Layer() layer.nam = "Layer 1" layer.index = len(layers) layers[layer.index] = layer readPoints(rootchunk.read(), layer) elif rootchunk.chunkname == b'POLS': last_pols_count = readPols1(rootchunk.read(), layer) elif rootchunk.chunkname == b'PCHS': last_pols_count = readPols1(rootchunk.read(), layer) layer.has_subds = True elif rootchunk.chunkname == b'PTAG': tag_type, = struct.unpack("4s", rootchunk.read(4)) if tag_type == b'SURF': readSurfTags(rootchunk.read(), layer, last_pols_count) else: rootchunk.skip() elif rootchunk.chunkname == b'SURF': readSurf1(rootchunk.read(), surfs) else: rootchunk.skip()
def readLwo2(file, filename, layers, surfs, tags): ''' Read version 2 file, LW 6+. ''' last_pols_count = 0 layer = None while True: try: rootchunk = chunk.Chunk(file) except EOFError: break if rootchunk.chunkname == b'TAGS': readTags(rootchunk.read(), tags) elif rootchunk.chunkname == b'BBOX': rootchunk.skip() elif rootchunk.chunkname == b'LAYR': layer = readLayr2(rootchunk.read(), layers) elif rootchunk.chunkname == b'PNTS': readPoints(rootchunk.read(), layer) elif rootchunk.chunkname == b'POLS': sub_type = rootchunk.read(4) # PTCH is LW's Subpatches, SUBD is CatmullClark. if (sub_type in (b'FACE', b'PTCH', b'SUBD')): last_pols_count = readPols2(rootchunk.read(), layer) if sub_type == b'FACE': layer.has_subds = True rootchunk.skip() elif rootchunk.chunkname == b'PTAG': sub_type, = UNPACK_NAME(rootchunk.read(4)) if sub_type == b'SURF': readSurfTags(rootchunk.read(), layer, last_pols_count) else: rootchunk.skip() elif rootchunk.chunkname == b'SURF': readSurf2(rootchunk.read(), surfs) else: rootchunk.skip()
def load_chunk(self, chunk_position): # load the chunk file chunk_path = self.chunk_position_to_path(chunk_position) try: chunk_blocks = nbt.load(chunk_path)["Level"]["Blocks"] except FileNotFoundError: return # create chunk and fill it with the blocks from our chunk file self.world.chunks[chunk_position] = chunk.Chunk(self.world, chunk_position) for x in range(chunk.CHUNK_WIDTH): for y in range(chunk.CHUNK_HEIGHT): for z in range(chunk.CHUNK_LENGTH): self.world.chunks[chunk_position].blocks[x][y][z] = chunk_blocks[ x * chunk.CHUNK_LENGTH * chunk.CHUNK_HEIGHT + z * chunk.CHUNK_HEIGHT + y]
def read_lwo2(file, filename, layers, surfs, tags, add_subd_mod, load_hidden, skel_to_arm): """Read version 2 file, LW 6+.""" handle_layer = True last_pols_count = 0 just_read_bones = False print("Importing LWO: " + filename + "\nLWO v2 Format") while True: try: rootchunk = chunk.Chunk(file) except EOFError: break if rootchunk.chunkname == b'TAGS': read_tags(rootchunk.read(), tags) elif rootchunk.chunkname == b'LAYR': handle_layer = read_layr(rootchunk.read(), layers, load_hidden) elif rootchunk.chunkname == b'PNTS' and handle_layer: read_pnts(rootchunk.read(), layers) elif rootchunk.chunkname == b'VMAP' and handle_layer: vmap_type = rootchunk.read(4) if vmap_type == b'WGHT': read_weightmap(rootchunk.read(), layers) elif vmap_type == b'MORF': read_morph(rootchunk.read(), layers, False) elif vmap_type == b'SPOT': read_morph(rootchunk.read(), layers, True) elif vmap_type == b'TXUV': read_uvmap(rootchunk.read(), layers) elif vmap_type == b'RGB ' or vmap_type == b'RGBA': read_colmap(rootchunk.read(), layers) else: rootchunk.skip() elif rootchunk.chunkname == b'VMAD' and handle_layer: vmad_type = rootchunk.read(4) if vmad_type == b'TXUV': read_uv_vmad(rootchunk.read(), layers, last_pols_count) elif vmad_type == b'RGB ' or vmad_type == b'RGBA': read_color_vmad(rootchunk.read(), layers, last_pols_count) elif vmad_type == b'WGHT': # We only read the Edge Weight map if it's there. read_weight_vmad(rootchunk.read(), layers) else: rootchunk.skip() elif rootchunk.chunkname == b'POLS' and handle_layer: face_type = rootchunk.read(4) just_read_bones = False # PTCH is LW's Subpatches, SUBD is CatmullClark. if (face_type == b'FACE' or face_type == b'PTCH' or face_type == b'SUBD') and handle_layer: last_pols_count = read_pols(rootchunk.read(), layers) if face_type != b'FACE': layers[-1].has_subds = True elif face_type == b'BONE' and handle_layer: read_bones(rootchunk.read(), layers) just_read_bones = True else: rootchunk.skip() elif rootchunk.chunkname == b'PTAG' and handle_layer: tag_type, = struct.unpack("4s", rootchunk.read(4)) if tag_type == b'SURF' and not just_read_bones: # Ignore the surface data if we just read a bones chunk. read_surf_tags(rootchunk.read(), layers, last_pols_count) elif skel_to_arm: if tag_type == b'BNUP': read_bone_tags(rootchunk.read(), layers, tags, 'BNUP') elif tag_type == b'BONE': read_bone_tags(rootchunk.read(), layers, tags, 'BONE') else: rootchunk.skip() else: rootchunk.skip() elif rootchunk.chunkname == b'SURF': read_surf(rootchunk.read(), surfs) else: #if handle_layer: #print("Skipping Chunk:", rootchunk.chunkname) rootchunk.skip()