Example #1
0
    def parse_compressed_list(self, stream, identifier, blocksizes):
        rawsize = dword2py_int(stream.read(4))
        list_identifier = stream.read(4)
        print list_identifier

        size = blocksizes[rawsize]

        size_field = py_int2dword(size)
        if size & 1: size += 1

        offset = stream.tell()

        obj = model.RiffList(identifier + size_field + list_identifier)

        while stream.tell() <= offset + size - 8:
            ret = self.parse_comressed_stream(stream, blocksizes)
            if ret is None:
                stream.seek(offset)
                chunk = stream.read(size - 4)
                return model.RiffUnparsedList(identifier + size_field + \
                      list_identifier + chunk)
            else:
                obj.childs.append(ret)

        return obj
Example #2
0
	def parse_compressed_list(self, stream, identifier, blocksizes):
		rawsize = dword2py_int(stream.read(4))
		list_identifier = stream.read(4)
		print list_identifier

		size = blocksizes[rawsize]

		size_field = py_int2dword(size)
		if size & 1:size += 1

		offset = stream.tell()

		obj = model.RiffList(identifier + size_field + list_identifier)

		while stream.tell() <= offset + size - 8:
			ret = self.parse_comressed_stream(stream, blocksizes)
			if ret is None:
				stream.seek(offset)
				chunk = stream.read(size - 4)
				return model.RiffUnparsedList(identifier + size_field + \
										list_identifier + chunk)
			else:
				obj.childs.append(ret)

		return obj
Example #3
0
 def __init__(self, chunk):
     self.childs = []
     self.chunk = chunk
     self.identifier = 'LIST'
     self.chunk_tag = self.chunk[8:12]
     self.chunk_size = dword2py_int(chunk[4:8])
     self.cache_fields = [(0, 4, 'list identifier'), (4, 4, 'chunk size'),
                          (8, 4, 'chunk tag')]
Example #4
0
def parse_polygon(obj):
	data = obj.loda.chunk
	offset = 112
	if obj.version == CDR6:offset = 100
	if obj.version == CDR13:offset = 104

	#Polygon angles
	obj.plg_num = dword2py_int(data[offset:offset + 4])
	obj.loda.cache_fields.append((offset, 4, 'num of polygon edges'))
Example #5
0
	def parse_compressed_object(self, stream, identifier, blocksizes):
		if not identifier[:3].isalnum():
			return None
		rawsize = dword2py_int(stream.read(4))
		size = blocksizes[rawsize]
		size_field = py_int2dword(size)
		if size & 1:size += 1
		chunk = stream.read(size)
		return model.RiffObject(identifier + size_field + chunk)
Example #6
0
	def __init__(self, chunk):
		self.chunk = chunk
		self.identifier = chunk[:4]
		self.chunk_size = dword2py_int(chunk[4:8])
		self.chunk_tag = '' + self.identifier
		self.cache_fields = [
						(0, 4, 'identifier'),
						(4, 4, 'chunk size'),
						]
Example #7
0
 def parse_compressed_object(self, stream, identifier, blocksizes):
     if not identifier[:3].isalnum():
         return None
     rawsize = dword2py_int(stream.read(4))
     size = blocksizes[rawsize]
     size_field = py_int2dword(size)
     if size & 1: size += 1
     chunk = stream.read(size)
     return model.RiffObject(identifier + size_field + chunk)
Example #8
0
 def __init__(self, chunk):
     self.chunk = chunk
     self.identifier = chunk[:4]
     self.chunk_size = dword2py_int(chunk[4:8])
     self.chunk_tag = '' + self.identifier
     self.cache_fields = [
         (0, 4, 'identifier'),
         (4, 4, 'chunk size'),
     ]
Example #9
0
    def parse_cmpr_list(self, buffer):
        obj = model.RiffCmprList(buffer)
        import StringIO, zlib
        compressedsize = dword2py_int(buffer[12:16])

        decomp = zlib.decompressobj()
        uncompresseddata = decomp.decompress(buffer[36:])

        blocksizesdata = zlib.decompress(buffer[36 + compressedsize:])
        blocksizes = []
        for i in range(0, len(blocksizesdata), 4):
            blocksizes.append(dword2py_int(blocksizesdata[i:i + 4]))

        stream = StringIO.StringIO(uncompresseddata)
        while stream.tell() < len(uncompresseddata):
            ret = self.parse_comressed_stream(stream, blocksizes)
            obj.childs.append(ret)

        return obj
Example #10
0
	def update(self):
		CdrGraphObj.update(self)
		data = self.loda.chunk
		offset = 112
		if self.version == CDR6:offset = 100
		if self.version == CDR13:offset = 104

		#Polygon angles
		self.plg_num = dword2py_int(data[offset:offset + 4])
		self.loda.cache_fields.append((offset, 4, 'num of polygon edges'))
Example #11
0
	def parse_cmpr_list(self, buffer):
		obj = model.RiffCmprList(buffer)
		import StringIO, zlib
		compressedsize = dword2py_int(buffer[12:16])

		decomp = zlib.decompressobj()
		uncompresseddata = decomp.decompress(buffer[36:])

		blocksizesdata = zlib.decompress(buffer[36 + compressedsize:])
		blocksizes = []
		for i in range(0, len(blocksizesdata), 4):
			blocksizes.append(dword2py_int(blocksizesdata[i:i + 4]))

		stream = StringIO.StringIO(uncompresseddata)
		while stream.tell() < len(uncompresseddata):
			ret = self.parse_comressed_stream(stream, blocksizes)
			obj.childs.append(ret)

		return obj
Example #12
0
	def __init__(self, chunk):
		self.childs = []
		self.chunk = chunk
		self.identifier = 'LIST'
		self.chunk_tag = self.chunk[8:12]
		self.chunk_size = dword2py_int(chunk[4:8])
		self.cache_fields = [
						(0, 4, 'list identifier'),
						(4, 4, 'chunk size'),
						(8, 4, 'chunk tag')
						]
Example #13
0
	def update(self):

		data = self.chunk[8:]

		#<trfd> chunk header processing
		self.data_num = dword2py_int(data[4:8])
		self.cache_fields.append((12, 4, 'number of data'))

		self.data_start = dword2py_int(data[8:12])
		self.cache_fields.append((16, 4, 'data start'))

		self.data_type_start = dword2py_int(data[12:16])
		self.cache_fields.append((20, 4, 'data types start'))

		#transformation matrix processing
		start = 32 + 8
		if self.version == CDR13: start += 8
		data = self.chunk[start:start + 48]
		self.cache_fields.append((start, 48, 'trafo matrix'))
		self.trafo = parse_matrix(data)
Example #14
0
	def parse_compressed_object(self, stream, identifier, blocksizes):
		if not identifier[:3].isalnum():
			return None
		rawsize = dword2py_int(stream.read(4))
		size = blocksizes[rawsize]
		size_field = py_int2dword(size)
		if size & 1:size += 1
		chunk = stream.read(size)
		self.report_stream_position(stream.tell())
		class_ = self.get_class(identifier)
		return class_(identifier + size_field + chunk)
Example #15
0
 def parse_compressed_object(self, stream, identifier, blocksizes):
     if not identifier[:3].isalnum():
         return None
     rawsize = dword2py_int(stream.read(4))
     size = blocksizes[rawsize]
     size_field = py_int2dword(size)
     if size & 1: size += 1
     chunk = stream.read(size)
     self.report_stream_position(stream.tell())
     class_ = self.get_class(identifier)
     return class_(identifier + size_field + chunk)
Example #16
0
	def do_update(self, presenter):
		RiffList.do_update(self, presenter)
		self.obj_type = None

		lgob_chunk = find_chunk(self.childs, 'lgob')
		self.loda = find_chunk(lgob_chunk.childs, 'loda')
		self.obj_type = dword2py_int(self.loda.chunk[0x18:0x1c])

		if not self.obj_type is None and obj_parse.has_key(self.obj_type):
			parse_trafo(self)
			obj_parse[self.obj_type][0](self)
Example #17
0
	def __init__(self, chunk):
		RiffObject.__init__(self, chunk)

		data = self.chunk

		#<loda> chunk header processing
		self.data_num = dword2py_int(data[12:16])
		self.cache_fields.append((12, 4, 'number of data'))

		self.data_start = dword2py_int(data[16:20])
		self.cache_fields.append((16, 4, 'data start'))

		self.data_type_start = dword2py_int(data[20:24])
		self.cache_fields.append((20, 4, 'data types start'))

		self.object_type = dword2py_int(data[24:28])
		self.cache_fields.append((24, 4, 'object type'))

		self.data_list = []

		num = self.data_num
		start = self.data_start + 8
		start_t = self.data_type_start + 8

		self.cache_fields.append((start, 4 * num, 'data offsets'))
		self.cache_fields.append((start_t, 4 * num, 'data type offsets'))

		for i in range(self.data_num):
			offset = dword2py_int(data[start + i * 4:start + i * 4 + 4])
			argtype = dword2py_int(data[start_t + (num - 1 - i) * 4:start_t + (num - 1 - i) * 4 + 4])
			self.data_list.append([argtype, offset])
Example #18
0
	def do_update(self, presenter):
		type = None

		lgob_chunk = find_chunk(self.childs, 'lgob')
		loda_chunk = find_chunk(lgob_chunk.childs, 'loda')
		type = dword2py_int(loda_chunk.chunk[0x18:0x1c])

		if not type is None and obj_dict.has_key(type):
			new_obj = obj_dict[type](self.chunk)
			new_obj.parent = self.parent
			new_obj.version = self.version
			new_obj.childs = self.childs
			new_obj.loda = loda_chunk

			index = self.parent.childs.index(self)
			self.parent.childs.insert(index, new_obj)
			self.parent.childs.remove(self)
			new_obj.do_update(presenter)
		else:
			RiffList.do_update(self, presenter)
Example #19
0
	def __init__(self, chunk):
		RiffList.__init__(self, chunk)

		self.compressedsize = dword2py_int(chunk[12:16])
		self.uncompressedsize = dword2py_int(chunk[16:20])
		self.blocksizessize = dword2py_int(chunk[20:24])
Example #20
0
	def update(self):
		CdrGraphObj.update(self)
		data = self.loda.chunk
		offset = 108

		for item in self.loda.data_list:
			if item[0] == const.DATA_COORDS:
				offset = item[1] + 8

		self.paths = []
		path = []
		points = []
		point1 = []
		point2 = []

		pointnum = dword2py_int(data[offset:offset + 4])
		self.num_of_points = pointnum
		self.loda.cache_fields.append((offset, 4, 'num of points'))
		self.loda.cache_fields.append((offset + 4, 8 * pointnum, 'curve points'))
		self.loda.cache_fields.append((offset + 4 + pointnum * 8, pointnum, 'point flags'))

		for i in range (pointnum):
			x = parse_size_value(data[offset + 4 + i * 8:offset + 8 + i * 8])
			y = parse_size_value(data[offset + 8 + i * 8:offset + 12 + i * 8])

			point_type = ord(data[offset + 4 + pointnum * 8 + i])
			if point_type & 0x10 == 0 and point_type & 0x20 == 0:
				marker = NODE_CUSP
			if point_type & 0x10 == 0x10:
				marker = NODE_SMOOTH
			if point_type & 0x20 == 0x20:
				marker = NODE_SYMMETRICAL

			if point_type & 0x40 == 0 and point_type & 0x80 == 0:
				if path:
					path.append(deepcopy(points))
					path.append(CURVE_OPENED)
					self.paths.append(deepcopy(path))
				path = []
				points = []
				point1 = []
				point2 = []
				path.append([x, y])
			if point_type & 0x40 == 0x40 and point_type & 0x80 == 0:
				points.append([x, y])
				point1 = []
				point2 = []
			if point_type & 0x40 == 0 and point_type & 0x80 == 0x80:
				points.append(deepcopy([point1, point2, [x, y], marker]))
				point1 = []
				point2 = []
			if point_type & 0x40 == 0x40 and point_type & 0x80 == 0x80:
				if point1:
					point2 = [x, y]
				else:
					point1 = [x, y]
			if point_type & 8 == 8:
				if path and points:
					path.append(deepcopy(points))
					path.append(CURVE_CLOSED)
					self.paths.append(deepcopy(path))
					path = []
					points = []
		if path:
			path.append(deepcopy(points))
			path.append(CURVE_OPENED)
			self.paths.append(deepcopy(path))
Example #21
0
    def __init__(self, chunk):
        RiffList.__init__(self, chunk)

        self.compressedsize = dword2py_int(chunk[12:16])
        self.uncompressedsize = dword2py_int(chunk[16:20])
        self.blocksizessize = dword2py_int(chunk[20:24])