Example #1
0
    def testSimpleSerialization(self):
        from cStringIO import StringIO
        basic_turtle = """@prefix dc: <http://purl.org/dc/terms/> .
        @prefix example: <http://example.com/> .

        example:foo dc:title "Foo" .
        example:bar dc:title "Bar" .
        example:baz dc:subject example:foo ."""

        graph = self.turtle_parser.parse(basic_turtle)
        f = StringIO()
        self.profile.setPrefix('ex', self.primitives.NamedNode('http://example.com/'))
        self.profile.setPrefix('dc', self.primitives.NamedNode('http://purl.org/dc/terms/'))
        self.serialize_turtle(graph = graph, f = f, profile = self.profile)
        f.seek(0)
        graph2 = self.turtle_parser.parse(f.read())
        f.seek(0)
        self.assertEqual(f.read().strip(), """@prefix ex: <http://example.com/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix dc: <http://purl.org/dc/terms/> .
ex:bar dc:title "Bar" ;
       .

ex:baz dc:subject ex:foo ;
       .

ex:foo dc:title "Foo" ;
       .""")
Example #2
0
    def testMultiplePredicates(self):
        from cStringIO import StringIO
        basic_turtle = """@prefix dc: <http://purl.org/dc/terms/> .
        @prefix example: <http://example.com/> .

        example:foo dc:title "Foo" ;
                    dc:author "Bar" ;
                    dc:subject example:yesfootoo .
        
        example:garply dc:title "Garply" ;
                    dc:author "Baz" ;
                    dc:subject example:thegarply ."""

        graph = self.turtle_parser.parse(basic_turtle)
        f = StringIO()
        self.profile.setPrefix('ex', self.primitives.NamedNode('http://example.com/'))
        self.profile.setPrefix('dc', self.primitives.NamedNode('http://purl.org/dc/terms/'))
        self.serialize_turtle(graph = graph, f = f, profile = self.profile)
        f.seek(0)
        graph2 = self.turtle_parser.parse(f.read())
        f.seek(0)
        self.assertEqual(f.read().strip(), """@prefix ex: <http://example.com/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix dc: <http://purl.org/dc/terms/> .
ex:foo dc:author "Bar" ;
       dc:subject ex:yesfootoo ;
       dc:title "Foo" ;
       .

ex:garply dc:author "Baz" ;
          dc:subject ex:thegarply ;
          dc:title "Garply" ;
          .""")
	def save(self):
		if self.img_principal:
			image = Image.open(self.img_principal)
			if image.mode not in ('L', 'RGB'):
				image = image.convert('RGB')
	        # for field_name, size in IMAGE_SIZES.iteritems():
	        # field = getattr(self, field_name)
	        temp_handle = StringIO()

	        img_tmp = image.resize(IMAGE_SIZES['img_principal'], Image.ANTIALIAS)
	    	img_tmp.save(temp_handle, 'png')
	    	temp_handle.seek(0)
	    	suf = SimpleUploadedFile(os.path.split(self.img_principal.name)[-1], temp_handle.read(), content_type='image/png')
	        self.img_principal.save(suf.name, suf, save=False)

	        img_tmp = image.resize(IMAGE_SIZES['thumb_g'], Image.ANTIALIAS)
	        temp_handle = StringIO()
	    	img_tmp.save(temp_handle, 'png')
	    	temp_handle.seek(0)
	    	suf = SimpleUploadedFile(os.path.split(self.img_principal.name)[-1], temp_handle.read(), content_type='image/png')
	        self.thumb_g.save(suf.name, suf, save=False)

	        img_tmp = image.resize(IMAGE_SIZES['thumb_p'], Image.ANTIALIAS)
	        temp_handle = StringIO()
	    	img_tmp.save(temp_handle, 'png')
	    	temp_handle.seek(0)
	    	suf = SimpleUploadedFile(os.path.split(self.img_principal.name)[-1], temp_handle.read(), content_type='image/png')
	        self.thumb_p.save(suf.name, suf, save=False)
		if False:
			pass
		else:
			super(Peca, self).save()
  def create_thumbnail(self):
    # Original code: http://snipt.net/danfreak/generate-thumbnails-in-django-with-pil/
    # And http://www.yilmazhuseyin.com/blog/dev/create-thumbnails-imagefield-django/

    converter = Converter(self.imageFile)
    image = converter.reduceSize()

    # Save the thumbnail
    temp_handle = StringIO()
    image.save(temp_handle, converter.PIL_TYPE)
    temp_handle.seek(0)
    suf = SimpleUploadedFile(os.path.split(self.imageFile.name)[-1],
                             temp_handle.read(), content_type=converter.DJANGO_TYPE)
    # Save SimpleUploadedFile into image field
    self.imageThumbnailFile.save('%s_thumbnail.%s' % (os.path.splitext(suf.name)[0], converter.FILE_EXTENSION), suf, save=False)

    temp_handle = StringIO()
    squaredImage = converter.createCenteredSquare()
    squaredImage.save(temp_handle, converter.PIL_TYPE)
    temp_handle.seek(0)
    suf = SimpleUploadedFile(os.path.split(self.imageFile.name)[-1],
                             temp_handle.read(), content_type=converter.DJANGO_TYPE)
    self.imageThumbnailSquaredFile.save('%s_thumbnail_squared.%s' % (os.path.splitext(suf.name)[0], converter.FILE_EXTENSION), suf, save=False)

    bwImage = squaredImage.convert("L")
    temp_handle = StringIO()

    bwImage.save(temp_handle, converter.PIL_TYPE)
    temp_handle.seek(0)
    suf = SimpleUploadedFile(os.path.split(self.imageFile.name)[-1],
                             temp_handle.read(), content_type=converter.DJANGO_TYPE)
    self.imageThumbnailBWFile.save('%s_thumbnail_bw.%s' % (os.path.splitext(suf.name)[0], converter.FILE_EXTENSION), suf, save=False)
Example #5
0
    def save(self, force_insert=False, force_update=False, using=None,
             update_fields=None):

        try:
            if self.image.file is not None:
                img = Image.open(self.image.file)

                thumbnail = img.resize((settings.GUDFUD_USER_THUMBNAIL_WIDTH, settings.GUDFUD_USER_THUMBNAIL_HEIGHT),
                                       Image.ANTIALIAS)

                temp_handle_img = StringIO()
                img.save(temp_handle_img, 'jpeg')
                temp_handle_img.seek(0)

                temp_handle_thumbnail = StringIO()
                thumbnail.save(temp_handle_thumbnail, 'jpeg')
                temp_handle_thumbnail.seek(0)

                fname_thumbnail = str(self.id) + ".jpeg"
                suf_thumbnail = SimpleUploadedFile(fname_thumbnail, temp_handle_thumbnail.read(),
                                                   content_type='image/jpeg')
                fname_img = str(self.id) + ".jpeg"
                suf_img = SimpleUploadedFile(fname_img, temp_handle_img.read(), content_type='image/jpeg')

                self.thumbnail_image.save(fname_thumbnail, suf_thumbnail, save=False)
                self.image.save(fname_img, suf_img, save=False)
        except ValueError:
            pass

        super(BaseUser, self).save(force_insert, force_update, using, update_fields)
Example #6
0
    def deserialize_tx_payload(self, data):
        msg = {}
        if type(data) is str:
            data = StringIO(data)

        msg['version'] = struct.unpack("<I", data.read(4))[0]

        msg['tx_in_count'] = self.deserialize_int(data)
        msg['tx_in'] = []
        for _ in xrange(msg['tx_in_count']):
            tx_in = self.deserialize_tx_in(data)
            msg['tx_in'].append(tx_in)

        msg['tx_out_count'] = self.deserialize_int(data)
        msg['tx_out'] = []
        for _ in xrange(msg['tx_out_count']):
            tx_out = self.deserialize_tx_out(data)
            msg['tx_out'].append(tx_out)

        msg['lock_time'] = struct.unpack("<I", data.read(4))[0]

        # Calculate hash from the entire payload
        payload = self.serialize_tx_payload(msg)
        msg['tx_hash'] = hexlify(sha256(sha256(payload))[::-1])

        return msg
Example #7
0
def extract( filename, dir ):
    """ Thank you very much! 
    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/465649
    """
    zf = zipfile.ZipFile( filename )

    # make base
    pushd = os.getcwd()
    if not os.path.isdir( dir ):
        os.mkdir( dir )
    os.chdir( dir )

    # extract files
    for fn in zf.namelist():
        fdir = os.path.dirname(fn)
        if fdir and not os.path.exists(fdir):
            os.makedirs(fdir)
        if fn.endswith('/'):
            continue
        try:
            out = open( fn, 'wb' )
            buffer = StringIO( zf.read( fn ))
            buflen = 2 ** 20
            datum = buffer.read( buflen )
            while datum:
                out.write( datum )
                datum = buffer.read( buflen )
            out.close()
        finally:
            pass #print fn
    os.chdir( pushd )
Example #8
0
def store_item(key, val, stream):

    """ The MANIFEST specification limits the width of individual
    lines to 72 bytes (including the terminating newlines). Any key
    and value pair that would be longer must be split up over multiple
    continuing lines"""

    key = key or ""
    val = val or ""

    if not (0 < len(key) < 69):
        raise Exception("Invalid key length: %i" % len(key))

    if len(key) + len(val) > 68:
        kvbuffer = StringIO(": ".join((key, val)))

        # first grab 70 (which is 72 after the trailing newline)
        stream.write(kvbuffer.read(70))

        # now only 69 at a time, because we need a leading space and a
        # trailing \n
        part = kvbuffer.read(69)
        while part:
            stream.write("\n ")
            stream.write(part)
            part = kvbuffer.read(69)
        kvbuffer.close()

    else:
        stream.write(key)
        stream.write(": ")
        stream.write(val)

    stream.write("\n")
Example #9
0
class TBufferedTransport(TTransportBase,CReadableTransport):

  """Class that wraps another transport and buffers its I/O.

  The implementation uses a (configurable) fixed-size read buffer
  but buffers all writes until a flush is performed.
  """

  DEFAULT_BUFFER = 4096

  def __init__(self, trans, rbuf_size = DEFAULT_BUFFER):
    self.__trans = trans
    self.__wbuf = StringIO()
    self.__rbuf = StringIO("")
    self.__rbuf_size = rbuf_size

  def isOpen(self):
    return self.__trans.isOpen()

  def open(self):
    return self.__trans.open()

  def close(self):
    return self.__trans.close()

  def read(self, sz):
    ret = self.__rbuf.read(sz)
    if len(ret) != 0:
      return ret

    self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
    return self.__rbuf.read(sz)

  def write(self, buf):
    self.__wbuf.write(buf)

  def flush(self):
    out = self.__wbuf.getvalue()
    # reset wbuf before write/flush to preserve state on underlying failure
    self.__wbuf = StringIO()
    self.__trans.write(out)
    self.__trans.flush()

  # Implement the CReadableTransport interface.
  @property
  def cstringio_buf(self):
    return self.__rbuf

  def cstringio_refill(self, partialread, reqlen):
    retstring = partialread
    if reqlen < self.__rbuf_size:
      # try to make a read of as much as we can.
      retstring += self.__trans.read(self.__rbuf_size)

    # but make sure we do read reqlen bytes.
    if len(retstring) < reqlen:
      retstring += self.__trans.readAll(reqlen - len(retstring))

    self.__rbuf = StringIO(retstring)
    return self.__rbuf
Example #10
0
 def extract(self, filename, dir):
     zf = zipfile.ZipFile( filename )
     namelist = zf.namelist()
     dirlist = filter( lambda x: x.endswith( '/' ), namelist )
     filelist = filter( lambda x: not x.endswith( '/' ), namelist )
     # make base
     pushd = os.getcwd()
     if not os.path.isdir( dir ):
         os.mkdir( dir )
     os.chdir( dir )
     # create directory structure
     dirlist.sort()
     for dirs in dirlist:
         dirs = dirs.split( '/' )
         prefix = ''
         for dir in dirs:
             dirname = os.path.join( prefix, dir )
             if dir and not os.path.isdir( dirname ):
                 os.mkdir( dirname )
             prefix = dirname
     # extract files
     for fn in filelist:
         try:
             out = open( fn, 'wb' )
             buffer = StringIO( zf.read( fn ))
             buflen = 2 ** 20
             datum = buffer.read( buflen )
             while datum:
                 out.write( datum )
                 datum = buffer.read( buflen )
             out.close()
         finally:
             print fn
     os.chdir( pushd )
Example #11
0
def deploy_project(args):
    conn = _get_zerocloud_conn(args)
    conn.authenticate()
    ui_auth_version = conn.auth_version

    # We can now reset the auth for the web UI, if needed
    if args.no_ui_auth:
        ui_auth_version = '0.0'

    auth = _prepare_auth(ui_auth_version, args, conn)
    auth_opts = jinja2.Markup(json.dumps(auth))

    deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
                                force=args.force)

    print('app deployed to\n  %s/%s' % (conn.url, deploy_index))

    if args.execute:
        # for compatibility with the option name in 'zpm execute'
        args.container = args.target
        resp_body_buffer = BytesIO()
        resp = execute(args, response_body_buffer=resp_body_buffer)
        resp_body_buffer.seek(0)

        if resp['status'] < 200 or resp['status'] >= 300:
            raise zpmlib.ZPMException(resp_body_buffer.read())

        if args.summary:
            total_time, exec_table = _get_exec_table(resp)
            print('Execution summary:')
            print(exec_table)
            print('Total time: %s' % total_time)

        sys.stdout.write(resp_body_buffer.read())
    def info(self):
        '''
        Return a string describing the loaded database version.

        @returns    English text string, or None if database is ancient.
        '''

        fp = StringIO(self.cache)
        fp.seek(-3, os.SEEK_END)

        hasStructureInfo = False

        # first get past the database structure information
        for i in range(STRUCTURE_INFO_MAX_SIZE):
            if fp.read(3) == '\xFF\xFF\xFF':
                hasStructureInfo = True
                break

            fp.seek(-4, os.SEEK_CUR)

        if hasStructureInfo:
            fp.seek(-6, os.SEEK_CUR)
        else:
            # no structure info, must be pre Sep 2002 database, go back to end.
            fp.seek(-3, os.SEEK_END)

        for i in range(DATABASE_INFO_MAX_SIZE):
            if fp.read(3) == '\0\0\0':
                return fp.read(i)

            fp.seek(-4, os.SEEK_CUR)
Example #13
0
	def _open(self):
		header = StringIO(self.fp.read(24))
		magic = header.read(4)
		if magic != "FTEX":
			raise ValueError("not a FTEX file")
		
		version = unpack("i", header.read(4))
		self.size = unpack("ii", header.read(8))
		linesize = (self.size[0] + 3) / 4 * 8
		mipmap_count, format_count = unpack("ii", header.read(8))
		self.mode = "RGB"
		
		self.tile = []
		for i in range(format_count):
			format, where = unpack("ii", self.fp.read(8))
			
			if format == 0:
				data = []
				self.fp.seek(where)
				size, = unpack("i", self.fp.read(4))
				for yb in xrange((self.size[1] + 3) / 4):
					decoded = dxtc.decodeDXT1(self.fp.read(linesize))
					for d in decoded:
						data.append(d)
				
				data = "".join(data[:self.size[1]])
				self.im = Image.core.new(self.mode, self.size)
				return self.fromstring(data)
			
			elif format == 1: # Uncompressed RGB
				self.tile.append(("raw", (0, 0) + self.size, where+4, (self.mode, 0, 1)))
			
			else:
				raise ValueError("Invalid texture format (expected 0 or 1, got %i)" % (format))
Example #14
0
    def write_AttenData_to_redis(self,OriAttenData,TagNum,SerialID):
        try:
            redispool = redis.ConnectionPool(host=self.redataIP,port=self.redataPort,db=self.redataDB)
            redata = redis.Redis(connection_pool=redispool)
            redpipe = redata.pipeline()

            OriAttenDataBuf = StringIO(OriAttenData)
            while TagNum != 0:
                # Obtain Tag Serial ID
                # Serial Number Has Two Ways Of Express
                # (1) String 24 , Ascii Code
                OriTagSerialID = OriAttenDataBuf.read(12)
                TagSerialID = binascii.b2a_hex(OriTagSerialID)
                # (2) String 8 , Decimal System
                # OriAttenDataBuf.read(8)
                # OriTagSerialID = OriAttenDataBuf.read(4)
                # TagSerialID = struct.unpack('<i', OriTagSerialID)[0]
                
                # Obtain Atten Stamp Time
                OriTimeStamp = OriAttenDataBuf.read(4)
                TimeStamp = struct.unpack('<i', OriTimeStamp)[0]
                
                redpipe.rpush("OriAttenDataList","%s-%s-%s"%(SerialID,TimeStamp,TagSerialID))

                self.logger.debug("%s - %s - %s - %s"%(TagNum,TimeStamp,time.ctime(TimeStamp),TagSerialID))

                TagNum = TagNum - 1
            redpipe.execute()
        except Exception,e:
            self.logger.error("Write Atten Data To Redis Exception,Error is %s,But Main Program Not Quit......"%e)
Example #15
0
    def from_data(self, fields, rows):
        fp = StringIO()
        writer = csv.writer(fp, quoting=csv.QUOTE_ALL)

        writer.writerow([name.encode("utf-8") for name in fields])

        for data in rows:
            row = []
            for d in data:
                if isinstance(d, basestring):
                    d = d.replace("\n", " ").replace("\t", " ")
                    try:
                        d = d.encode("utf-8")
                    except UnicodeError:
                        pass
                if d is False:
                    d = None
                row.append(d)
            writer.writerow(row)

            if fp.tell() >= 1250:
                fp.seek(0)
                data = fp.read()
                yield data
                fp.seek(0)
                fp.truncate()
                row = []

        fp.seek(0)  # Flush the final data
        data = fp.read()
        fp.close()
        yield data
        return
Example #16
0
    def read(self, path):
        self._path = path

        with open(path) as fh:
            log.debug("Reading %r of size %d bytes.", path, os.path.getsize(path))

            data = StringIO(fh.read())

            self._header = Header.decode(data)

            self._text = data.read(self._header.text)
            self._data = data.read(self._header.data)
            text_reloc = data.read(self._header.trsize)
            data_reloc = data.read(self._header.drsize)
            symbols = data.read(self._header.syms)
            str_size = struct.unpack(">I", data.read(4))[0]
            strings = data.read()

            if str_size != len(strings):
                log.warn("Wrong size of string table!")

            self._strings = StringTable.decode(strings)

            for i in range(0, len(symbols), 12):
                self._symbols.append(SymbolInfo.decode(symbols[i : i + 12]))

            for i in range(0, len(text_reloc), 8):
                self._text_relocs.append(RelocInfo.decode(text_reloc[i : i + 8]))

            for i in range(0, len(data_reloc), 8):
                self._data_relocs.append(RelocInfo.decode(data_reloc[i : i + 8]))
Example #17
0
File: bmp.py Project: exscape/BMP
	def rgb_merge(self, r, g, b):
		""" (Re)combine the red, green and blue color channels from three separate pictures of identical size into one. """
		if self.empty != True:
			 warn("Running rgb_merge() on a non-empty BMP; the existing data will be overwritten!")

		# Ugh...
		if len(set((r.width, g.width, b.width))) != 1 or len(set((r.height, g.height, b.height))) != 1 or len(set((r.bpp, g.bpp, b.bpp))) != 1:
			die("The dimensions and/or bpp differs between the input images to rgb_merge()!")

		rf = StringIO(r.bitmap_data)
		gf = StringIO(g.bitmap_data)
		bf = StringIO(b.bitmap_data)

		out_bitmap_data = ""

		for row_num in xrange(0, b.height):
			for pix in xrange(0, b.width):
				red_pixel = struct.unpack("3B", rf.read(3))[2]
				green_pixel = struct.unpack("3B", gf.read(3))[1]
				blue_pixel = struct.unpack("3B", bf.read(3))[0]

				out_bitmap_data += "".join( (chr(blue_pixel), chr(green_pixel), chr(red_pixel)) )

			out_bitmap_data += chr(0x00) * r.padding_size

			rf.seek(r.padding_size, 1)
			gf.seek(g.padding_size, 1)
			bf.seek(b.padding_size, 1)

		return BMP(r.all_headers + out_bitmap_data, True)
Example #18
0
    def start_drag_operation(self, event):
        '''Start dragging whatever is selected'''
        fd = StringIO()
        modules_to_save = [m.module_num for m in self.get_selected_modules()]
        self.__pipeline.savetxt(fd, modules_to_save)
        pipeline_data_object = PipelineDataObject()
        fd.seek(0)
        pipeline_data_object.SetData(fd.read())

        text_data_object = wx.TextDataObject()
        fd.seek(0)
        text_data_object.SetData(fd.read())
        
        data_object = wx.DataObjectComposite()
        data_object.Add(pipeline_data_object)
        data_object.Add(text_data_object)
        drop_source = wx.DropSource(self.list_ctrl)
        drop_source.SetData(data_object)
        self.drag_underway = True
        self.drag_start = event.Position
        self.drag_time = time.time()
        selected_module_ids = [m.id for m in self.get_selected_modules()]
        self.__pipeline.start_undoable_action()
        try:
            result = drop_source.DoDragDrop(wx.Drag_AllowMove)
            self.drag_underway = False
            if result == wx.DragMove:
                for id in selected_module_ids:
                    for module in self.__pipeline.modules():
                        if module.id == id:
                            self.__pipeline.remove_module(module.module_num)
                            break
        finally:
            self.__pipeline.stop_undoable_action("Drag and drop")
Example #19
0
 def save(self, *args, **kwargs):
     image = PILImage.open(self.instance.image.file)
     temp_handle = StringIO()
     greyscale_image = image.convert("L")
     greyscale_image.save(temp_handle, 'png')
     temp_handle.seek(0)
     suf = SimpleUploadedFile(path.split(self.instance.image.name)[-1],
                              temp_handle.read(), content_type='image/png')
     name = suf.name
     if "." in name:
         name = name.split(".")[0]
     self.instance.preprocessed_image.save("%s_l.png" % name, suf,
                                           save=False)
     if self.cleaned_data["process"] == PROCESSES_DICT["HANDWRITTEN"]:
         factor = [5, 2, 0.25]
         temp_handle = StringIO()
         raw_mask = extract_handwritten_text(image, factor=factor)
         regions_mask = draw_regions(raw_mask, raw_mask,
                                     outline=["white", "white", None],
                                     fill=["white", "white", None])
         handwritten_mask = remove_noise(regions_mask).convert("1")
         handwritten_mask.save(temp_handle, 'png')
         temp_handle.seek(0)
         suf = SimpleUploadedFile(path.split(self.instance.image.name)[-1],
                                  temp_handle.read(),
                                  content_type='image/png')
         name = suf.name
         if "." in name:
             name = name.split(".")[0]
         self.instance.handwritten_mask.save("%s_h.png" % name, suf,
                                            save=False)
     return super(InitialImageForm, self).save(*args, **kwargs)
Example #20
0
class TFramedTransport(TTransportBase, CReadableTransport):
  """Class that wraps another transport and frames its I/O when writing."""

  def __init__(self, trans,):
    self.__trans = trans
    self.__rbuf = StringIO()
    self.__wbuf = StringIO()

  def isOpen(self):
    return self.__trans.isOpen()

  def open(self):
    return self.__trans.open()

  def close(self):
    return self.__trans.close()

  def read(self, sz):
    ret = self.__rbuf.read(sz)
    if len(ret) != 0:
      return ret

    self.readFrame()
    return self.__rbuf.read(sz)

  def readFrame(self):
    buff = self.__trans.readAll(4)
    sz, = unpack('!i', buff)
    self.__rbuf = StringIO(self.__trans.readAll(sz))

  def write(self, buf):
    self.__wbuf.write(buf)

  def flush(self):
    wout = self.__wbuf.getvalue()
    wsz = len(wout)
    # reset wbuf before write/flush to preserve state on underlying failure
    self.__wbuf = StringIO()
    # N.B.: Doing this string concatenation is WAY cheaper than making
    # two separate calls to the underlying socket object. Socket writes in
    # Python turn out to be REALLY expensive, but it seems to do a pretty
    # good job of managing string buffer operations without excessive copies
    buf = pack("!i", wsz) + wout
    self.__trans.write(buf)
    self.__trans.flush()

  # Implement the CReadableTransport interface.
  @property
  def cstringio_buf(self):
    return self.__rbuf

  def cstringio_refill(self, prefix, reqlen):
    # self.__rbuf will already be empty here because fastbinary doesn't
    # ask for a refill until the previous buffer is empty.  Therefore,
    # we can start reading new frames immediately.
    while len(prefix) < reqlen:
      self.readFrame()
      prefix += self.__rbuf.getvalue()
    self.__rbuf = StringIO(prefix)
    return self.__rbuf
def extractPIL(quad):
	fop = StringIO(quad['source'].get())

	strmarker=fop.read(4)
	marker=unpack('<4B',strmarker)
	if strmarker in ('KCDC','KCD2'):
		raise CompressedError,'Section is compressed'
	if marker not in ((1,0,3,3),(1,0,5,5)):	raise LoadError('Bad marker: %s' % (marker,))
	junk,x,y,w,h,filesize=unpack('<6l',fop.read(24))
	w-=x
	h-=y
	linelengths=unpack('<%iH' % (h),fop.read(2*h))
	surf = Image.new('P', (w,h))
	
	surf.putpalette(PALETTE)
	for line in range(h):
		if linelengths[line]!=0:
			linepos=0
			xpos=0
			while linepos<linelengths[line]:
				skip,linesize=unpack('<2B',fop.read(2))
				xpos+=skip
				linepos+=2
				lineimage = Image.frombytes('P',(linesize,1),fop.read(linesize))
				surf.paste(lineimage, (xpos,line))
				linepos+=linesize
	return surf
Example #22
0
def decrypt(buf, passphrase):
    '''Decrypt *buf'''

    fh = StringIO(buf)

    len_ = struct.unpack(b'<B', fh.read(struct.calcsize(b'<B')))[0]
    nonce = fh.read(len_)

    key = sha256(passphrase + nonce)
    cipher = aes.AES(key) #IGNORE:E1102
    hmac_ = hmac.new(key, digestmod=hashlib.sha256)

    # Read (encrypted) hmac
    hash_ = fh.read(HMAC_SIZE)

    buf = fh.read()
    buf = cipher.process(buf)
    hmac_.update(buf)

    hash_ = cipher.process(hash_)

    if hash_ != hmac_.digest():
        raise ChecksumError('HMAC mismatch')

    return buf
Example #23
0
def tokenize(query):
    if isinstance(query, basestring):
        query = StringIO(query)

    quoted = None
    sdata = []

    while True:
        c = query.read(1)
        if not c: break

        if quoted:
            if c == quoted:
                yield TOKEN_STRING, ''.join(sdata)
                sdata = []
                quoted = None
            else:
                if c == '\\':
                    c = query.read(1)
                    if not c: break
                sdata.append(c)
            continue

        if c in SYMBOLS_QUOTE:
            quoted = c
            continue

        for sym_token, sym in TOKEN_SYMBOLS_TABLE.iteritems():
            if c in sym:
                stoken = _sdata_to_token(sdata)
                sdata = []
                if stoken is not None: yield stoken
                if sym_token is not None: yield sym_token, c
                break
        else:
            if c in '><=!':
                stoken = _sdata_to_token(sdata)
                sdata = []
                if stoken is not None: yield stoken

                nc = query.read(1)
                if not c:break

                if nc == '=':
                    yield TOKEN_OPERATOR, c + nc
                elif (c == '<' and nc in '<') or (c == '>' and nc == '>'):
                    yield TOKEN_OPERATOR, c + nc
                else:
                    if c in '=!': c += '='
                    yield TOKEN_OPERATOR, c
                    if nc not in SYMBOLS_SPACE:
                        sdata.append(nc)
            else:
                sdata.append(c)

    if quoted:
        raise Exception("Missing end quote")

    stoken = _sdata_to_token(sdata)
    if stoken is not None: yield stoken
Example #24
0
class UserEnvFile(object):
    """
    Behaves like a file object, but instead of being directly mapped to a file
    it writes to that file from inside of a UserEnv.
    """

    def __init__(self, userenv, filename):
        self.stringio = StringIO()
        self.userenv = userenv
        self.filename = filename

    def read(self, *args, **kwargs):
        self.stringio.read(*args, **kwargs)

    def readlines(self, *args, **kwargs):
        self.stringio.readlines(*args, **kwargs)

    def write(self, *args, **kwargs):
        self.stringio.write(*args, **kwargs)

    def writelines(self, *args, **kwargs):
        self.stringio.writelines(*args, **kwargs)

    def seek(self, *args, **kwargs):
        self.stringio.seek(*args, **kwargs)

    def close(self):
        self.userenv.write_string_to_file(self.stringio.getvalue(), self.filename)
        self.userenv = None
        self.stringio.close()
Example #25
0
def generate_card(IMG_FILE,IMG_URL,TEXT_FRONT,TEXT_INSIDE):
    """Main function to generate PDF and print to stdout. Designed to be
    run as a CGI application."""
    if not (IMG_FILE or IMG_URL):
        print 'You must upload an image file or provide a URL to one'
        return

    tmp = StringIO()

    #Canvas Setup
    c = canvas.Canvas(tmp, pagesize=letter, pageCompression=0)
    #If output size is important set pageCompression=1, but remember that, compressed documents
    #will be smaller, but slower to generate. Note that images are always compressed, and this option will only
    #save space if you have a very large amount of text and vector graphics on each page.
    #Do I need 'MacRomanEncoding' for Macs?
    #Note: 0,0 coordinate in canvas is bottom left.
    c.setAuthor('Utility Mill - utilitymill.com/utility/greeting_card_generator')
    c.setTitle('Beautiful, expensive greeting card created at utilitymill.com')

    #add_fold_lines(c) #early feedback says lines aren't necessary. Uncomment here to put them in
    put_in_front_picture(c,IMG_FILE,IMG_URL)
    write_text(c,TEXT_FRONT.replace('\r',''),TEXT_INSIDE.replace('\r',''))

    #The showPage method saves the current page of the canvas
    c.showPage()
    #The save method stores the file and closes the canvas.
    c.save()

    tmp.seek(0)
    print 'Content-Type: application/pdf'
    print tmp.read()
Example #26
0
def decrypt_block_pw(password, crypted_data, compress='zlib', compress_options=[]):
    """ Decrypts a file using AES (CBC mode) with the given arbitrary password.
    compress_options:
        passed to the decompression ficntion you chose via '*compress_options'.
        even though it is decompression we do here we call the parameter the same
        as in encrypt_block_pw() for symmetry. sorry.
    """
    ##################
    # prepare
    key = getHashKey(password) # make 32byte key for AES
    assert len(crypted_data) > 0
    infile = StringIO(crypted_data)
    ##################
    # aux stuff
    # - get original compressed size
    origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
    iv = infile.read(16)
    ##################
    # decrypt
    decryptor = AES.new(key, AES.MODE_CBC, iv)
    data = decryptor.decrypt(infile.read())
    ##################
    # decompress
    if compress == None:
        rawdata = data[:origsize]
    elif compress == 'zlib':
        # level=1 is very fast, and good enough to get rid of long repeats
        rawdata = zlib.decompress(data[:origsize], *compress_options)
    else:
        raise ValueError("unknown decompression '%s'" % compression)
    # result
    return rawdata
Example #27
0
def get_brickname(dev):
    counterout = random.randint(1, 200)
    sout = StringIO()
    sout.write(struct.pack('<H', counterout))
    sout.write(struct.pack('B', DIRECT_COMMAND_REPLY))
    sout.write(struct.pack('B', 0x06))  # 00001100 two variables: see lms2012/lms2012/doc/html/directcommands.html
    sout.write(struct.pack('B', 0x00))  # 00000000
    sout.write(struct.pack('B', 0xD3))  # opCOM_GET
    sout.write(struct.pack('B', 13))    # GET_BRICKNAME
    sout.write(struct.pack('B', 0x06))  # LC0(6) Local Constant
    sout.write(struct.pack('B', 0x60))  # GV0(0) Global Variable for response
    message = sout.getvalue()
    write_message(dev, message)
    ret = dev.read(EP_IN, 1024, 0, 100)

    sin = StringIO(ret)
    cmd_size = struct.unpack('<H', sin.read(2))
    counterin = struct.unpack('<H', sin.read(2))
    cmd_type = struct.unpack('B', sin.read(1))
    brickname = []

    value = sin.read(1)
    while value != '\x00':
        brickname.append(value)
        value = sin.read(1)

    return ''.join(brickname)
Example #28
0
class Response(object):
    def __init__(self, http_response, spool_size=MAX_MEM_FILE_SIZE):
        # We have to read the full response, even if we don't expect a body,
        # otherwise, the next request fails. The user may choose to do this,
        # use the automatic disc/memory buffering.
        chunk_size =  max(spool_size, MAX_MEM_FILE_SIZE) // 2
        self.http_response = http_response
        if spool_size > 0:
            # automatic disc/mem buffering of response
            if http_response.getheader('Content-Length') > spool_size:
                self.body = tempfile.TemporaryFile(bufsize=chunk_size)
            else:
                self.body = StringIO()
            while True:
                chunk = http_response.read(chunk_size)
                if not chunk:
                    break
                self.body.write(chunk)
            self.body.seek(0)
        else:
            self.body = http_response

        if http_response.status >= 300:
            # read the error message into a StringIO (it should be small)
            if (spool_size <= 0 or http_response.getheader('Content-Length') >
                    spool_size):
                self.body = StringIO(self.body.read())
            self.message = self.body.read()
            self.body.seek(0)
        else:
            self.message = "%03d %s" % (http_response.status,
                    http_response.reason)
Example #29
0
    def deserialize_msg(self, data):
        msg = {}

        data_len = len(data)
        if data_len < HEADER_LEN:
            raise HeaderTooShortError("got {} of {} bytes".format(
                data_len, HEADER_LEN))

        data = StringIO(data)
        header = data.read(HEADER_LEN)
        msg.update(self.deserialize_header(header))

        if (data_len - HEADER_LEN) < msg['length']:
            self.required_len = HEADER_LEN + msg['length']
            raise PayloadTooShortError("got {} of {} bytes".format(
                data_len, HEADER_LEN + msg['length']))

        payload = data.read(msg['length'])
        computed_checksum = sha256(sha256(payload))[:4]
        if computed_checksum != msg['checksum']:
            raise InvalidPayloadChecksum("{} != {}".format(
                binascii.hexlify(computed_checksum),
                binascii.hexlify(msg['checksum'])))

        if msg['command'] == "version":
            msg.update(self.deserialize_version_payload(payload))
        elif msg['command'] == "addr":
            msg.update(self.deserialize_addr_payload(payload))
        elif msg['command'] == "inv":
            msg.update(self.deserialize_inv_payload(payload))

        return (msg, data.read())
Example #30
0
	def add_packet(self, new_packet):
		"""
			Adds the packet to the unfinished object. Updates all things appropriately

			:param string newPacket: The incoming packet
		"""
		packet = unpack(new_packet)
		reader = StringIO(packet[1]) # Get the actual packet data as a StringIO
		packet_number = _bin_unpack(reader.read(4)) # Read the 4-byte number of the packet

		# Try to write the info to the correct place
		self.info_buffer.seek(_UnfinishedPacket.MAX_PACKET_SIZE * packet_number)
		self.info_buffer.write(reader.read())
		reader.close()
		self.info_buffer.seek(0)
		# Update incoming packet tracking data
		if(packet_number > self.last_received):
			self.last_received = packet_number
			for num in range(self.last_received,packet_number):
				self.missing_packets.append(num)
		else:
			self.missing_packets.remove(packet_number)

		# See if all packets are received.
		if(self.last_received == self.num_packets-1 and not self.missing_packets):
			self.done = True
Example #31
0
def _excel_data_getter_for_xlwt(name,
                                excel_data,
                                sheet2name=None,
                                sheet2data=None):
    """
        在本地生成excel临时文件, 返回路径待上传
        sheet2name & sheet2data 用来记录报表查询的条件
    """
    workbook = xlwt.Workbook(encoding='utf-8')
    worksheet = workbook.add_sheet(name)

    # header style
    header_style = xlwt.easyxf(
        'pattern: pattern solid;font: name Source Han Sans CN;')

    header_font = xlwt.Font()
    header_font.name = u'Source Han Sans CN'  # 指定“Source Han Sans CN”
    header_font.height = 200
    header_style.font = header_font

    borders = xlwt.Borders()
    borders.bottom = xlwt.Borders.THIN
    header_style.borders = borders

    header_style.pattern.pattern_fore_colour = 0x16

    # normal style
    normal_style = xlwt.XFStyle()

    normal_font = xlwt.Font()
    normal_font.name = u'Source Han Sans CN'  # 指定“Source Han Sans CN”
    normal_font.height = 200

    normal_style.font = normal_font

    # group style
    group_style = xlwt.XFStyle()  # xlwt.easyxf('pattern: pattern solid;')

    group_font = xlwt.Font()
    group_font.name = u'Source Han Sans CN'  # 指定“Source Han Sans CN”
    group_font.height = 200  # 字体大小
    # group_font.colour_index = 0x0C
    # group_font.bold = True
    # group_font.italic = True

    group_style.font = group_font

    # group_style.pattern.pattern_fore_colour = 0x16

    borders = xlwt.Borders()
    borders.bottom = xlwt.Borders.THIN
    group_style.borders = borders

    # 对于group的行,将前面空白的列不应用group样式
    for r in range(len(excel_data)):  # row
        style = normal_style
        flag = 'normal'  # normal, group_no_start, group_start
        if r >= 1:
            flag = 'group_no_start'
        elif r == 0:
            style = header_style

        for c in range(len(excel_data[r])):  # column

            if flag == 'group_no_start':

                if excel_data[r][c] is not None:
                    flag = 'group_start'
                    style = group_style

            if excel_data[r][c] is None:
                worksheet.write(r, c, None, style)
            elif isinstance(excel_data[r][c], (int, long, float, complex)):
                worksheet.write(r, c, excel_data[r][c], style)
            else:
                worksheet.write(r, c, '%s' % excel_data[r][c], style)

    if sheet2name and sheet2data:
        # sheet2 添加, 此处是为了在excel里面添加查询时候的条件
        worksheet2 = workbook.add_sheet(sheet2name)
        for r in range(len(sheet2data)):  # row
            style = normal_style
            flag = 'normal'  # normal, group_no_start, group_start
            if r >= 1:
                flag = 'group_no_start'
            elif r == 0:
                style = header_style

            for c in range(len(sheet2data[r])):  # column

                if flag == 'group_no_start':

                    if sheet2data[r][c] is not None:
                        flag = 'group_start'
                        style = group_style

                if sheet2data[r][c] is None:
                    worksheet2.write(r, c, None, style)
                elif isinstance(sheet2data[r][c], (int, long, float, complex)):
                    worksheet2.write(r, c, sheet2data[r][c], style)
                else:
                    worksheet2.write(r, c, '%s' % sheet2data[r][c], style)

    fp = StringIO()
    workbook.save(fp)
    fp.seek(0)
    data = fp.read()
    fp.close()
    return data
Example #32
0
    def load_course(self, course_dir, tracker):
        """
        Load a course into this module store
        course_path: Course directory name

        returns a CourseDescriptor for the course
        """
        log.debug(
            '========> Starting course import from {0}'.format(course_dir))

        with open(self.data_dir / course_dir / "course.xml") as course_file:

            # VS[compat]
            # TODO (cpennington): Remove this once all fall 2012 courses have
            # been imported into the cms from xml
            course_file = StringIO(
                clean_out_mako_templating(course_file.read()))

            course_data = etree.parse(course_file,
                                      parser=edx_xml_parser).getroot()

            org = course_data.get('org')

            if org is None:
                msg = ("No 'org' attribute set for course in {dir}. "
                       "Using default 'edx'".format(dir=course_dir))
                log.warning(msg)
                tracker(msg)
                org = 'edx'

            course = course_data.get('course')

            if course is None:
                msg = ("No 'course' attribute set for course in {dir}."
                       " Using default '{default}'".format(dir=course_dir,
                                                           default=course_dir))
                log.warning(msg)
                tracker(msg)
                course = course_dir

            url_name = course_data.get('url_name', course_data.get('slug'))
            policy_dir = None
            if url_name:
                policy_dir = self.data_dir / course_dir / 'policies' / url_name
                policy_path = policy_dir / 'policy.json'

                policy = self.load_policy(policy_path, tracker)

                # VS[compat]: remove once courses use the policy dirs.
                if policy == {}:
                    old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(
                        url_name)
                    policy = self.load_policy(old_policy_path, tracker)
            else:
                policy = {}
                # VS[compat] : 'name' is deprecated, but support it for now...
                if course_data.get('name'):
                    url_name = Location.clean(course_data.get('name'))
                    tracker("'name' is deprecated for module xml.  Please use "
                            "display_name and url_name.")
                else:
                    raise ValueError(
                        "Can't load a course without a 'url_name' "
                        "(or 'name') set.  Set url_name.")

            course_id = CourseDescriptor.make_id(org, course, url_name)
            system = ImportSystem(
                self,
                course_id,
                course_dir,
                policy,
                tracker,
                self.parent_trackers[course_id],
                self.load_error_modules,
            )

            course_descriptor = system.process_xml(
                etree.tostring(course_data, encoding='unicode'))

            # If we fail to load the course, then skip the rest of the loading steps
            if isinstance(course_descriptor, ErrorDescriptor):
                return course_descriptor

            # NOTE: The descriptors end up loading somewhat bottom up, which
            # breaks metadata inheritance via get_children().  Instead
            # (actually, in addition to, for now), we do a final inheritance pass
            # after we have the course descriptor.
            compute_inherited_metadata(course_descriptor)

            # now import all pieces of course_info which is expected to be stored
            # in <content_dir>/info or <content_dir>/info/<url_name>
            self.load_extra_content(system, course_descriptor, 'course_info',
                                    self.data_dir / course_dir / 'info',
                                    course_dir, url_name)

            # now import all static tabs which are expected to be stored in
            # in <content_dir>/tabs or <content_dir>/tabs/<url_name>
            self.load_extra_content(system, course_descriptor, 'static_tab',
                                    self.data_dir / course_dir / 'tabs',
                                    course_dir, url_name)

            self.load_extra_content(system, course_descriptor,
                                    'custom_tag_template',
                                    self.data_dir / course_dir / 'custom_tags',
                                    course_dir, url_name)

            self.load_extra_content(system, course_descriptor, 'about',
                                    self.data_dir / course_dir / 'about',
                                    course_dir, url_name)

            log.debug('========> Done with course import from {0}'.format(
                course_dir))
            return course_descriptor
Example #33
0
class GenericExprVisitor(object):
    """ A DWARF expression is a sequence of instructions encoded in a block
        of bytes. This class decodes the sequence into discrete instructions
        with their arguments and allows generic "visiting" to process them.

        Usage: subclass this class, and override the needed methods. The
        easiest way would be to just override _after_visit, which gets passed
        each decoded instruction (with its arguments) in order. Clients of
        the visitor then just execute process_expr. The subclass can keep
        its own internal information updated in _after_visit and provide
        methods to extract it. For a good example of this usage, see the
        ExprDumper class in the descriptions module.

        A more complex usage could be to override visiting methods for
        specific instructions, by placing them into the dispatch table.
    """
    def __init__(self, structs):
        self.structs = structs
        self._init_dispatch_table()
        self.stream = None
        self._cur_opcode = None
        self._cur_opcode_name = None
        self._cur_args = []

    def process_expr(self, expr):
        """ Process (visit) a DWARF expression. expr should be a list of
            (integer) byte values.
        """
        self.stream = StringIO(bytelist2string(expr))

        while True:
            # Get the next opcode from the stream. If nothing is left in the
            # stream, we're done.
            byte = self.stream.read(1)
            if len(byte) == 0:
                break

            # Decode the opcode and its name
            self._cur_opcode = ord(byte)
            self._cur_opcode_name = DW_OP_opcode2name.get(
                self._cur_opcode, 'OP:0x%x' % self._cur_opcode)
            # Will be filled in by visitors
            self._cur_args = [] 

            # Dispatch to a visitor function
            visitor = self._dispatch_table.get(
                    self._cur_opcode,
                    self._default_visitor)
            visitor(self._cur_opcode, self._cur_opcode_name)

            # Finally call the post-visit function
            self._after_visit(
                    self._cur_opcode, self._cur_opcode_name, self._cur_args)

    def _after_visit(self, opcode, opcode_name, args):
        pass
        
    def _default_visitor(self, opcode, opcode_name):
        pass
        
    def _visit_OP_with_no_args(self, opcode, opcode_name):
        self._cur_args = []

    def _visit_OP_addr(self, opcode, opcode_name):
        self._cur_args = [
                struct_parse(self.structs.Dwarf_target_addr(''), self.stream)]

    def _make_visitor_arg_struct(self, struct_arg):
        """ Create a visitor method for an opcode that that accepts a single
            argument, specified by a struct.
        """
        def visitor(opcode, opcode_name):
            self._cur_args = [struct_parse(struct_arg, self.stream)]
        return visitor

    def _make_visitor_arg_struct2(self, struct_arg1, struct_arg2):
        """ Create a visitor method for an opcode that that accepts two
            arguments, specified by structs.
        """
        def visitor(opcode, opcode_name):
            self._cur_args = [
                struct_parse(struct_arg1, self.stream),
                struct_parse(struct_arg2, self.stream)]
        return visitor

    def _init_dispatch_table(self):
        self._dispatch_table = {}
        def add(opcode_name, func):
            self._dispatch_table[DW_OP_name2opcode[opcode_name]] = func
            
        add('DW_OP_addr', self._visit_OP_addr)
        add('DW_OP_const1u', 
            self._make_visitor_arg_struct(self.structs.Dwarf_uint8('')))
        add('DW_OP_const1s', 
            self._make_visitor_arg_struct(self.structs.Dwarf_int8('')))
        add('DW_OP_const2u', 
            self._make_visitor_arg_struct(self.structs.Dwarf_uint16('')))
        add('DW_OP_const2s', 
            self._make_visitor_arg_struct(self.structs.Dwarf_int16('')))
        add('DW_OP_const4u', 
            self._make_visitor_arg_struct(self.structs.Dwarf_uint32('')))
        add('DW_OP_const4s', 
            self._make_visitor_arg_struct(self.structs.Dwarf_int32('')))
        add('DW_OP_const8u', 
            self._make_visitor_arg_struct2(
                self.structs.Dwarf_uint32(''),
                self.structs.Dwarf_uint32('')))
        add('DW_OP_const8s', 
            self._make_visitor_arg_struct2(
                self.structs.Dwarf_int32(''),
                self.structs.Dwarf_int32('')))
        add('DW_OP_constu',
            self._make_visitor_arg_struct(self.structs.Dwarf_uleb128('')))
        add('DW_OP_consts',
            self._make_visitor_arg_struct(self.structs.Dwarf_sleb128('')))
        add('DW_OP_pick',
            self._make_visitor_arg_struct(self.structs.Dwarf_uint8('')))
        add('DW_OP_plus_uconst',
            self._make_visitor_arg_struct(self.structs.Dwarf_uleb128('')))
        add('DW_OP_bra', 
            self._make_visitor_arg_struct(self.structs.Dwarf_int16('')))
        add('DW_OP_skip', 
            self._make_visitor_arg_struct(self.structs.Dwarf_int16('')))

        for opname in [ 'DW_OP_deref', 'DW_OP_dup', 'DW_OP_drop', 'DW_OP_over',
                        'DW_OP_swap', 'DW_OP_swap', 'DW_OP_rot', 'DW_OP_xderef',
                        'DW_OP_abs', 'DW_OP_and', 'DW_OP_div', 'DW_OP_minus',
                        'DW_OP_mod', 'DW_OP_mul', 'DW_OP_neg', 'DW_OP_not',
                        'DW_OP_plus', 'DW_OP_shl', 'DW_OP_shr', 'DW_OP_shra',
                        'DW_OP_xor', 'DW_OP_eq', 'DW_OP_ge', 'DW_OP_gt',
                        'DW_OP_le', 'DW_OP_lt', 'DW_OP_ne', 'DW_OP_nop',
                        'DW_OP_push_object_address', 'DW_OP_form_tls_address',
                        'DW_OP_call_frame_cfa']:
            add(opname, self._visit_OP_with_no_args)

        for n in range(0, 32):
            add('DW_OP_lit%s' % n, self._visit_OP_with_no_args)
            add('DW_OP_reg%s' % n, self._visit_OP_with_no_args)
            add('DW_OP_breg%s' % n, 
                self._make_visitor_arg_struct(self.structs.Dwarf_sleb128('')))

        add('DW_OP_fbreg',
            self._make_visitor_arg_struct(self.structs.Dwarf_sleb128('')))
        add('DW_OP_regx',
            self._make_visitor_arg_struct(self.structs.Dwarf_uleb128('')))
        add('DW_OP_bregx',
            self._make_visitor_arg_struct2(
                self.structs.Dwarf_uleb128(''),
                self.structs.Dwarf_sleb128('')))
        add('DW_OP_piece',
            self._make_visitor_arg_struct(self.structs.Dwarf_uleb128('')))
        add('DW_OP_bit_piece',
            self._make_visitor_arg_struct2(
                self.structs.Dwarf_uleb128(''),
                self.structs.Dwarf_uleb128('')))
        add('DW_OP_deref_size',
            self._make_visitor_arg_struct(self.structs.Dwarf_int8('')))
        add('DW_OP_xderef_size',
            self._make_visitor_arg_struct(self.structs.Dwarf_int8('')))
        add('DW_OP_call2',
            self._make_visitor_arg_struct(self.structs.Dwarf_uint16('')))
        add('DW_OP_call4',
            self._make_visitor_arg_struct(self.structs.Dwarf_uint32('')))
        add('DW_OP_call_ref',
            self._make_visitor_arg_struct(self.structs.Dwarf_offset('')))
Example #34
0
def readDSF(path, netdefs, terrains, bbox=None, bytype=None):
    wantoverlay = not terrains
    wantmesh = not wantoverlay
    baddsf=(0, "Invalid DSF file", path)

    if __debug__: print path.encode(getfilesystemencoding() or 'utf-8')
    h=file(path, 'rb')
    sig=h.read(8)
    if sig.startswith('7z\xBC\xAF\x27\x1C'):	# X-Plane 10 compressed
        if __debug__: clock=time.clock()
        if Archive7z:
            h.seek(0)
            data=Archive7z(h).getmember(basename(path)).read()
            h.close()
            h=StringIO(data)
        else:
            h.close()
            cmds=exists('/usr/bin/7zr') and '/usr/bin/7zr' or '/usr/bin/7za'
            cmds='%s e "%s" -o"%s" -y' % (cmds, path, gettempdir())
            (i,o,e)=popen3(cmds)
            i.close()
            err=o.read()
            err+=e.read()
            o.close()
            e.close()
            h=file(join(gettempdir(), basename(path)), 'rb')
        if __debug__: print "%6.3f time in decompression" % (time.clock()-clock)
        sig=h.read(8)
    if sig!='XPLNEDSF' or unpack('<I',h.read(4))!=(1,):
        raise IOError, baddsf

    # scan for contents
    table={}
    h.seek(-16,SEEK_END)	# stop at MD5 checksum
    end=h.tell()
    p=12
    while p<end:
        h.seek(p)
        d=h.read(8)
        (c,l)=unpack('<4sI', d)
        table[c]=p+4
        p+=l
    if __debug__: print table
    if not 'DAEH' in table or not 'NFED' in table or not 'DOEG' in table or not 'SDMC' in table:
        raise IOError, baddsf

    # header
    h.seek(table['DAEH'])
    (l,)=unpack('<I', h.read(4))
    headend=h.tell()+l-8
    if h.read(4)!='PORP':
        raise IOError, baddsf
    (l,)=unpack('<I', h.read(4))
    placements=[]
    nets = defaultdict(list)
    mesh = defaultdict(list)
    c=h.read(l-9).split('\0')
    h.read(1)
    overlay=0
    for i in range(0, len(c)-1, 2):
        if c[i]=='sim/overlay': overlay=int(c[i+1])
        elif c[i]=='sim/south': south=int(c[i+1])
        elif c[i]=='sim/west': west=int(c[i+1])
        elif c[i] in Exclude.NAMES:
            if ',' in c[i+1]:	# Fix for FS2XPlane 0.99
                v=[float(x) for x in c[i+1].split(',')]
            else:
                v=[float(x) for x in c[i+1].split('/')]
            placements.append(Exclude(Exclude.NAMES[c[i]], 0, [[Node([v[0],v[1]]), Node([v[2],v[1]]), Node([v[2],v[3]]), Node([v[0],v[3]])]]))
    if wantoverlay and not overlay and not bbox:
        # Not an Overlay DSF - bail early
        h.close()
        raise IOError (0, "%s is not an overlay." % basename(path))
    if overlay and (bbox or wantmesh):
        # only interested in mesh data - bail early
        h.close()
        return (south, west, placements, nets, mesh)
        
    h.seek(headend)

    # Definitions Atom
    h.seek(table['NFED'])
    (l,)=unpack('<I', h.read(4))
    defnend=h.tell()+l-8
    terrain=objects=polygons=networks=rasternames=[]
    while h.tell()<defnend:
        c=h.read(4)
        (l,)=unpack('<I', h.read(4))
        if l==8:
            pass	# empty
        elif c=='TRET':
            terrain=h.read(l-9).replace('\\','/').replace(':','/').split('\0')
            h.read(1)
        elif c=='TJBO':
            objects=[x.decode() for x in h.read(l-9).replace('\\','/').replace(':','/').split('\0')]	# X-Plane only supports ASCII
            h.read(1)
        elif c=='YLOP':
            polygons=[x.decode() for x in h.read(l-9).replace('\\','/').replace(':','/').split('\0')]	# X-Plane only supports ASCII
            h.read(1)
        elif c=='WTEN':
            networks=h.read(l-9).replace('\\','/').replace(':','/').split('\0')
            h.read(1)
        elif c=='NMED':
            rasternames=h.read(l-9).replace('\\','/').replace(':','/').split('\0')
            h.read(1)
        else:
            h.seek(l-8, 1)

    # We only understand a limited set of v10-style networks
    if networks and networks!=[NetworkDef.DEFAULTFILE]:
        if wantoverlay and not bbox:
            raise IOError, (0, 'Unsupported network: %s' % ', '.join(networks))
        else:
            skipnetworks = True
    else:
        skipnetworks = False

    # Geodata Atom
    if __debug__: clock=time.clock()	# Processor time
    h.seek(table['DOEG'])
    (l,)=unpack('<I', h.read(4))
    geodend=h.tell()+l-8
    pool=[]
    scal=[]
    po32=[]
    sc32=[]
    while h.tell()<geodend:
        c=h.read(4)
        (l,)=unpack('<I', h.read(4))
        if skipnetworks and c in ['23OP','23CS']:
            h.seek(l-8, 1)	# Skip network data
        elif c in ['LOOP','23OP']:
            if c=='LOOP':
                poolkind=pool
                fmt='<H'
                ifmt=uint16
                size=2
            else:
                poolkind=po32
                fmt='<I'
                ifmt=uint32
                size=4
            (n,p)=unpack('<IB', h.read(5))
            #if __debug__: print c,n,p
            thispool = empty((n,p), ifmt)
            # Pool data is supplied in column order (by "plane"), so use numpy slicing to assign
            for i in range(p):
                (e,)=unpack('<B', h.read(1))	# encoding type - default DSFs use e=3
                if e&2:		# RLE
                    offset = 0
                    while offset<n:
                        (r,)=unpack('<B', h.read(1))
                        if (r&128):	# repeat
                            (d,)=unpack(fmt, h.read(size))
                            thispool[offset:offset+(r&127),i] = d
                            offset += (r&127)
                        else:		# non-repeat
                            thispool[offset:offset+r,i] = fromstring(h.read(r*size), fmt)
                            offset += r
                else:		# raw
                    thispool[:,i] = fromstring(h.read(n*size), fmt)
                if e&1:		# differenced
                    thispool[:,i] = cumsum(thispool[:,i], dtype=ifmt)
            poolkind.append(thispool)
        elif c=='LACS':
            scal.append(fromstring(h.read(l-8), '<f').reshape(-1,2))
            #if __debug__: print c,scal[-1]
        elif c=='23CS':
            sc32.append(fromstring(h.read(l-8), '<f').reshape(-1,2))
            #if __debug__: print c,sc32[-1]
        else:
            h.seek(l-8, 1)
    if __debug__: print "%6.3f time in GEOD atom" % (time.clock()-clock)
    
    # Rescale pools
    if __debug__: clock=time.clock()			# Processor time
    for i in range(len(pool)):				# number of pools
        curpool = pool[i]
        curscale= scal[i]
        newpool = empty(curpool.shape, float)		# need double precision for placements
        for plane in range(len(curscale)):		# number of planes in this pool
            (scale,offset) = curscale[plane]
            if scale:
                newpool[:,plane] = curpool[:,plane] * (scale/0xffff) + float(offset)
            else:
                newpool[:,plane] = curpool[:,plane] + float(offset)
        # numpy doesn't work efficiently skipping around the variable sized pools, so don't consolidate
        pool[i] = newpool

    # if __debug__:	# Dump pools
    #     for p in pool:
    #         for x in p:
    #             for y in x:
    #                 print "%.5f" % y,
    #             print
    #         print

    # Rescale network pool
    while po32 and not len(po32[-1]): po32.pop()	# v10 DSFs have a bogus zero-dimensioned pool at the end
    if po32:
        if len(po32)!=1 or sc32[0].shape!=(4,2):
            raise IOError, baddsf			# code below is optimized for one big pool
        if wantoverlay:
            newpool = empty((len(po32[0]),3), float)	# Drop junction IDs. Need double precision for placements
            for plane in range(3):
                (scale,offset) = sc32[0][plane]
                newpool[:,plane] = po32[0][:,plane] * (scale/0xffffffffL) + float(offset)
            po32 = newpool
        else:
            # convert to local coords if we just want network lines. Do calculations in double, store result as single.
            centrelat = south+0.5
            centrelon = west+0.5
            newpool = empty((len(po32[0]),6), float32)	# drop junction IDs, add space for color
            lat = po32[0][:,1] * (sc32[0][1][0]/0xffffffffL) + float(sc32[0][1][1])	# double
            newpool[:,0] =(po32[0][:,0] * onedeg*(sc32[0][0][0]/0xffffffffL) + onedeg*(sc32[0][0][1] - centrelon)) * numpy.cos(numpy.radians(lat))	# lon -> x
            newpool[:,1] = po32[0][:,2] * (sc32[0][2][0]/0xffffffffL) + float(sc32[0][2][1])	# y
            newpool[:,2] = onedeg*centrelat - onedeg*lat	# lat -> z
            if __debug__:
                assert not sc32[0][3].any()		# Junction IDs are unscaled
                newpool[:,3] = po32[0][:,3]		# Junction ID for splitting (will be overwritten at consolidation stage)
            po32 = newpool

    if __debug__:
        print "%6.3f time in rescale" % (time.clock()-clock)
        total = 0
        longest = 0
        for p in pool:
            total += len(p)
            longest = max(longest, len(p))
        print 'pool:', len(pool), 'Avg:', total/(len(pool) or 1), 'Max:', longest
        print 'po32:', len(po32)

    # X-Plane 10 raster data
    raster={}
    elev=elevwidth=elevheight=None
    if 'SMED' in table:
        if __debug__: clock=time.clock()
        h.seek(table['SMED'])
        (l,)=unpack('<I', h.read(4))
        demsend=h.tell()+l-8
        layerno=0
        while h.tell()<demsend:
            if h.read(4)!='IMED': raise IOError, baddsf
            (l,)=unpack('<I', h.read(4))
            (ver,bpp,flags,width,height,scale,offset)=unpack('<BBHIIff', h.read(20))
            if __debug__: print 'IMED', ver, bpp, flags, width, height, scale, offset, rasternames[layerno]
            if h.read(4)!='DMED': raise IOError, baddsf
            (l,)=unpack('<I', h.read(4))
            assert l==8+bpp*width*height
            if flags&3==0:	# float
                fmt='f'
                assert bpp==4
            elif flags&3==3:
                raise IOError, baddsf
            else:		# signed
                if bpp==1:
                    fmt='b'
                elif bpp==2:
                    fmt='h'
                elif bpp==4:
                    fmt='i'
                else:
                    raise IOError, baddsf
                if flags&3==2:	# unsigned
                    fmt=fmt.upper()
            data = fromstring(h.read(bpp*width*height), '<'+fmt).reshape(width,height)
            raster[rasternames[layerno]]=data
            if rasternames[layerno]=='elevation':	# we're only interested in elevation
                assert flags&4				# algorithm below assumes post-centric data
                assert scale==1.0 and offset==0		# we don't handle other cases
                elev=raster['elevation']
                elevwidth=width-1
                elevheight=height-1
            layerno+=1
        if __debug__: print "%6.3f time in DEMS atom" % (time.clock()-clock)

    # Commands Atom
    if __debug__: clock=time.clock()	# Processor time
    h.seek(table['SDMC'])
    (l,)=unpack('<I', h.read(4))
    cmdsend=h.tell()+l-8
    curpool=0
    netbase=0
    netcolor = COL_NETWORK
    netname = '#000' + NetworkDef.NETWORK
    idx=0
    near=0
    far=-1
    flags=0	# 1=physical, 2=overlay
    roadtype=0
    curter='terrain_Water'
    curpatch=[]
    tercache={'terrain_Water':(join('Resources','Sea01.png'), True, 0, 0.001,0.001)}
    stripindices = MakeStripIndices()
    fanindices   = MakeFanIndices()

    if __debug__: cmds = defaultdict(int)
    while h.tell()<cmdsend:
        (c,)=unpack('<B', h.read(1))
        if __debug__: cmds[c] += 1
        #if __debug__: print "%08x %d" % (h.tell()-1, c)

        # Commands in rough order of frequency of use
        if c==10:	# Network Chain Range (used by g2xpl and MeshTool)
            (first,last)=unpack('<HH', h.read(4))
            #print "\nChain Range %d %d" % (first,last)
            if skipnetworks or last-first<2:
                pass
            elif wantoverlay:
                assert curpool==0, curpool
                placements.append(Network(netname, 0, [[Node(p) for p in po32[netbase+first:netbase+last]]]))
            else:
                assert curpool==0, curpool
                #assert not nodes[1:-2,3].any(), nodes	# Only handle single complete chain
                nets[netcolor].append(po32[netbase+first:netbase+last])

        elif c==9:	# Network Chain (KSEA demo terrain uses this one)
            (l,)=unpack('<B', h.read(1))
            #print "\nChain %d" % l
            if skipnetworks:
                h.read(l*2)
            elif wantoverlay:
                assert curpool==0, curpool
                placements.append(Network(netname, 0, [[Node(p) for p in po32[netbase+fromstring(h.read(l*2), '<H').astype(int)]]]))
            else:
                assert curpool==0, curpool
                #assert not nodes[1:-2,3].any(), nodes	# Only handle single complete chain
                nets[netcolor].append(po32[netbase+fromstring(h.read(l*2), '<H').astype(int)])

        elif c==11:	# Network Chain 32 (KSEA demo terrain uses this one too)
            (l,)=unpack('<B', h.read(1))
            #print "\nChain32 %d" % l
            if skipnetworks:
                h.read(l*4)
            elif wantoverlay:
                assert curpool==0, curpool
                placements.append(Network(netname, 0, [[Node(p) for p in po32[fromstring(h.read(l*4), '<I')]]]))
            else:
                assert curpool==0, curpool
                #assert not nodes[1:-2,3].any(), nodes	# Only handle single complete chain
                nets[netcolor].append(po32[fromstring(h.read(l*4), '<I')])

        elif c==13:	# Polygon Range (DSF2Text uses this one)
            (param,first,last)=unpack('<HHH', h.read(6))
            if not wantoverlay or last-first<2: continue
            winding=[]
            for d in range(first, last):
                p=pool[curpool][d]
                winding.append(p.tolist())
            placements.append(Polygon.factory(polygons[idx], param, [winding]))

        elif c==15:	# Nested Polygon Range (DSF2Text uses this one too)
            (param,n)=unpack('<HB', h.read(3))
            i=[]
            for j in range(n+1):
                (l,)=unpack('<H', h.read(2))
                i.append(l)
            if not wantoverlay: continue
            windings=[]
            for j in range(n):
                winding=[]
                for d in range(i[j],i[j+1]):
                    p=pool[curpool][d]
                    winding.append(p.tolist())
                windings.append(winding)
            placements.append(Polygon.factory(polygons[idx], param, windings))

        elif c==27:	# Patch Triangle Strip - cross-pool (KSEA demo terrain uses this one)
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '27: Triangle strip %d' % l
            if flags&1 and wantmesh:
                curpatch.append(array([pool[p][d] for (p,d) in fromstring(h.read(l*4), '<H').reshape(-1,2)])[stripindices[l]])
                assert len(curpatch[-1]) == 3*(l-2), len(curpatch[-1])
            else:
                h.seek(l*4, 1)

        elif c==28:	# Patch Triangle Strip Range (KSEA demo terrain uses this one too)
            (first,last)=unpack('<HH', h.read(4))
            #if __debug__: print '28: Triangle strip %d' % (last-first)
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][first:][stripindices[last-first]])
                assert len(curpatch[-1]) == 3*(last-first-2), len(curpatch[-1])

        elif c==1:	# Coordinate Pool Select
            (curpool,)=unpack('<H', h.read(2))
            
        elif c==2:	# Junction Offset Select
            (netbase,)=unpack('<I', h.read(4))
            #print "\nJunction Offset %d" % netbase
            
        elif c==3:	# Set Definition
            (idx,)=unpack('<B', h.read(1))
            
        elif c==4:	# Set Definition
            (idx,)=unpack('<H', h.read(2))
            
        elif c==5:	# Set Definition
            (idx,)=unpack('<I', h.read(4))
            
        elif c==6:	# Set Road Subtype
            (roadtype,)=unpack('<B', h.read(1))
            netcolor = roadtype in netdefs and netdefs[roadtype].color or COL_NETWORK
            netname  = roadtype in netdefs and netdefs[roadtype].name or '#%03d%s' % (roadtype, NetworkDef.NETWORK)
            #print "\nRoad type %d" % roadtype
            
        elif c==7:	# Object
            (d,)=unpack('<H', h.read(2))
            p=pool[curpool][d]
            if wantoverlay:
                placements.append(Object.factory(objects[idx], p[1],p[0], round(p[2],1)))
                
        elif c==8:	# Object Range
            (first,last)=unpack('<HH', h.read(4))
            if wantoverlay:
                for d in range(first, last):
                    p=pool[curpool][d]
                    placements.append(Object.factory(objects[idx], p[1],p[0], round(p[2],1)))

        elif c==12:	# Polygon
            (param,l)=unpack('<HB', h.read(3))
            if not wantoverlay or l<2:
                h.read(l*2)
                continue
            winding=[]
            for i in range(l):
                (d,)=unpack('<H', h.read(2))
                p=pool[curpool][d]
                winding.append(p.tolist())
            placements.append(Polygon,factory(polygons[idx], param, [winding]))
            
        elif c==14:	# Nested Polygon
            (param,n)=unpack('<HB', h.read(3))
            windings=[]
            for i in range(n):
                (l,)=unpack('<B', h.read(1))
                winding=[]
                for j in range(l):
                    (d,)=unpack('<H', h.read(2))
                    p=pool[curpool][d]
                    winding.append(p.tolist())
                windings.append(winding)
            if wantoverlay and n>0 and len(windings[0])>=2:
                placements.append(Polygon.factory(polygons[idx], param, windings))
                
        elif c==16:	# Terrain Patch
            makemesh(mesh,path,curter,curpatch,south,west,elev,elevwidth,elevheight,terrains,tercache)
            #if __debug__: print '\n16: Patch, flags=%d' % flags
            curter=terrain[idx]
            curpatch=[]
            
        elif c==17:	# Terrain Patch w/ flags
            makemesh(mesh,path,curter,curpatch,south,west,elev,elevwidth,elevheight,terrains,tercache)
            (flags,)=unpack('<B', h.read(1))
            #if __debug__: print '\n17: Patch, flags=%d' % flags
            curter=terrain[idx]
            curpatch=[]
            
        elif c==18:	# Terrain Patch w/ flags & LOD
            makemesh(mesh,path,curter,curpatch,south,west,elev,elevwidth,elevheight,terrains,tercache)
            (flags,near,far)=unpack('<Bff', h.read(9))
            #if __debug__: print '18: Patch, flags=%d, lod=%d,%d' % (flags, near,far)
            assert near==0	# We don't currently handle LOD
            curter=terrain[idx]
            curpatch=[]

        elif c==23:	# Patch Triangle
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '23: Triangles %d' % l
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][fromstring(h.read(l*2), '<H')])
                assert len(curpatch[-1]) == l, len(curpatch[-1])
            else:
                h.seek(l*2, 1)

        elif c==24:	# Patch Triangle - cross-pool
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '24: Triangles %d' % l
            if flags&1 and wantmesh:
                curpatch.append(array([pool[p][d] for (p,d) in fromstring(h.read(l*4), '<H').reshape(-1,2)]))
                assert len(curpatch[-1]) == l, len(curpatch[-1])
            else:
                h.seek(l*4, 1)

        elif c==25:	# Patch Triangle Range
            (first,last)=unpack('<HH', h.read(4))
            #if __debug__: print '25: Triangles %d' % (last-first)
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][first:last])
                assert len(curpatch[-1]) == last-first, len(curpatch[-1])

        elif c==26:	# Patch Triangle Strip (used by g2xpl and MeshTool)
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '26: Triangle strip %d' % l
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][fromstring(h.read(l*2), '<H')[stripindices[l]]])
                assert len(curpatch[-1]) == 3*(l-2), len(curpatch[-1])
            else:
                h.seek(l*2, 1)

        elif c==29:	# Patch Triangle Fan
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '29: Triangle fan %d' % l
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][fromstring(h.read(l*2), '<H')[fanindices[l]]])
                assert len(curpatch[-1]) == 3*(l-2), len(curpatch[-1])
            else:
                h.seek(l*2, 1)

        elif c==30:	# Patch Triangle Fan - cross-pool
            (l,)=unpack('<B', h.read(1))
            #if __debug__: print '30: Triangle fan %d' % l
            if flags&1 and wantmesh:
                curpatch.append(array([pool[p][d] for (p,d) in fromstring(h.read(l*4), '<H').reshape(-1,2)])[fanindices[l]])
                assert len(curpatch[-1]) == 3*(l-2), len(curpatch[-1])
            else:
                h.seek(l*4, 1)

        elif c==31:	# Patch Triangle Fan Range
            (first,last)=unpack('<HH', h.read(4))
            #if __debug__: print '31: Triangle fan %d' % (last-first)
            if flags&1 and wantmesh:
                curpatch.append(pool[curpool][first:][fanindices[last-first]])
                assert len(curpatch[-1]) == 3*(last-first-2), len(curpatch[-1])

        elif c==32:	# Comment
            (l,)=unpack('<B', h.read(1))
            h.read(l)
            
        elif c==33:	# Comment
            (l,)=unpack('<H', h.read(2))
            h.read(l)
            
        elif c==34:	# Comment
            (l,)=unpack('<I', h.read(4))
            h.read(l)
            
        else:
            if __debug__: print "Unrecognised command (%d) at %x" % (c, h.tell()-1)
            raise IOError, (c, "Unrecognised command (%d)" % c, path)

    # Last one
    makemesh(mesh,path,curter,curpatch,south,west,elev,elevwidth,elevheight,terrains,tercache)

    if __debug__:
        print "%6.3f time in CMDS atom" % (time.clock()-clock)
        print 'Stats:'
        for cmd in sorted(cmds.keys()): print cmd, cmds[cmd]
        if not wantoverlay: print "%d patches, avg subsize %s" % (makemesh.count, makemesh.total/makemesh.count)
    h.close()

    # consolidate mesh
    for k,v in mesh.iteritems():
        mesh[k] = concatenate(v)

    if len(terrain)>1 and 'g2xpl' in terrain[1]:
        # Post-processing for g2xpl-generated meshes. This is slow so only do it if a g2xpl texture is used.
        if __debug__: clock=time.clock()
        for k,v in mesh.iteritems():
            # sort vertices of each triangle
            dtype = [('x1',float32), ('y1',float32), ('z1',float32), ('u1',float32), ('v1',float32),
                     ('x2',float32), ('y2',float32), ('z2',float32), ('u2',float32), ('v2',float32),
                     ('x3',float32), ('y3',float32), ('z3',float32), ('u3',float32), ('v3',float32)]
            v = v.reshape((-1,15))
            v1 = v.view(dtype)
            v2 = roll(v, -5, axis=1).view(dtype)
            v3 = roll(v, -10, axis=1).view(dtype)
            v12= where(logical_or(v2['x1'] > v1['x1'], logical_and(v2['x1'] == v1['x1'], v2['z1'] > v1['z1'])), v2, v1)
            v  = where(logical_or(v3['x1'] >v12['x1'], logical_and(v3['x1'] ==v12['x1'], v3['z1'] >v12['z1'])), v3, v12)

            # remove negatives - calculate cross product at middle point p2
            # http://paulbourke.net/geometry/polygonmesh/ "... vertices ordered clockwise or counterclockwise"
            v = v[(v['x2']-v['x1']) * (v['z3']-v['z2']) - (v['z2']-v['z1']) * (v['x3']-v['x2']) > 0]

            # Remove dupes. numpy.unique() only works on 1D arrays -
            # http://mail.scipy.org/pipermail/numpy-discussion/2010-September/052877.html
            v = unique(v)
            mesh[k] = v.view(float32).reshape((-1,5))
        if __debug__: print "%6.3f time in g2xpl post-processing" % (time.clock()-clock)

    # apply colors to network points, consolidate and create indices for drawing
    # FIXME: speed this up
    if nets:
        counts = []	# points in each chain
        newnets = []
        for color, cnets in nets.iteritems():
            counts.extend([len(chain) for chain in cnets])
            cnets = vstack(cnets)
            cnets[:,3:6] = color	# apply color across all points
            newnets.append(cnets)
        newnets = vstack(newnets)
        counts = array(counts, int)
        start  = cumsum(concatenate((zeros((1,), int), counts)))[:-1]
        end    = start + counts - 1
        indices= concatenate([repeat(arange(start[i],end[i],1,GLuint), 2) for i in range(len(counts))])
        indices[1::2] += 1
        assert (len(indices) == (sum(counts)-len(counts))*2)
        nets = (newnets, indices)
    else:
        nets = None

    if bbox:	# filter to bounding box
        if bytype is Object:
            placements = [p for p in placements if p.inside(bbox) and (isinstance(p, Object) or isinstance(p, AutoGenBlock) or isinstance(p, AutoGenString))]	# filter by type, including AutoGenPoints
        elif bytype:
            placements = [p for p in placements if p.inside(bbox) and p.__class__ is bytype]	# filter by type, excluding derived
    else:
        if bytype is Object:
            placements = [p for p in placements if isinstance(p, Object) or isinstance(p, AutoGenBlock) or isinstance(p, AutoGenString)]	# filter by type, including AutoGenPoints
        elif bytype:
            placements = [p for p in placements if p.__class__ is bytype]	# filter by type, excluding derived

    return (south, west, placements, nets, mesh)
Example #35
0
    def on_graph(self, graph_response):
        """ TODO: clean this up using templates """

        if graph_response.error is not None:
            print "Got error from API server"
            graph_response.rethrow()
        if graph_response.code != 200:
            print "Got response code %s from API server" % graph_response.code
            self.send_error(graph_response.code)

        # TODO: figure out the interface for this

        # TODO: if there are too many nodes, we need to prune the view
        # One idea: do a Dijkstra from all running nodes. Hide all nodes
        # with distance >= 50.
        tasks = json.loads(graph_response.body)["response"]

        graphviz = pygraphviz.AGraph(directed=True, size=12)
        n_nodes = 0
        for task, p in tasks.iteritems():
            selector = p['status']

            if selector == PENDING and not p['workers']:
                selector = 'BROKEN'

            colors = {
                PENDING: ('white', 'black'),
                DONE: ('green', 'white'),
                FAILED: ('red', 'white'),
                RUNNING: ('blue', 'white'),
                'BROKEN': ('orange', 'black'),  # external task, can't run
            }
            fillcolor = colors[selector][0]
            fontcolor = colors[selector][1]
            shape = 'box'
            label = task.replace('(', '\\n(').replace(
                ',', ',\\n')  # force GraphViz to break lines
            # TODO: if the ( or , is a part of the argument we shouldn't really break it

            # TODO: FIXME: encoding strings is not compatible with newer pygraphviz
            graphviz.add_node(task.encode('utf-8'),
                              label=label.encode('utf-8'),
                              style='filled',
                              fillcolor=fillcolor,
                              fontcolor=fontcolor,
                              shape=shape,
                              fontname='Helvetica',
                              fontsize=11)
            n_nodes += 1

        for task, p in tasks.iteritems():
            for dep in p['deps']:
                graphviz.add_edge(dep, task)

        if n_nodes < 200:
            graphviz.layout('dot')
        else:
            # stupid workaround...
            graphviz.layout('fdp')

        s = StringIO()
        graphviz.draw(s, format='svg')
        s.seek(0)
        svg = s.read()
        # TODO: this code definitely should not live here:
        html_header = pkg_resources.resource_string(__name__,
                                                    'static/header.html')

        pattern = r'(<svg.*?)(<g id="graph1".*?)(</svg>)'
        mo = re.search(pattern, svg, re.S)

        self.write(''.join([
            html_header,
            mo.group(1), '<g id="viewport">',
            mo.group(2), '</g>',
            mo.group(3), "</body></html>"
        ]))

        self.finish()
Example #36
0
    def print_report(self):
        self.ensure_one()
        print('ensure_one = ', self.ensure_one())
        context = dict(self._context or {})
        # Header Section
        date_from = self.date_from
        date_to = self.date_to
        now = datetime.now()
        date_now = now.strftime('%d/%b/%Y')
        time_now = now.strftime('%H:%M')
        year = now.strftime('%Y')
        company_name = self.company_id.name
        all_employee = self.env['hr.employee'].search([('company_id', '=',
                                                        self.company_id.id)])
        print('all_employee = ', all_employee)
        company_name = company_name.upper()
        dhm = 'Day - Hour:Minute'
        chm = 'Count - Hour:Minute'
        ##########################

        # Leave Section
        all_leave = self.env['hr.holidays.status'].search([])
        all_leave_name = []
        for leave in self.holiday_status_ids:
            if leave.name not in all_leave_name:
                all_leave_name.append(leave.name)

        # ########################3333

        workbook = xlwt.Workbook()
        header1 = xlwt.easyxf(
            'font: bold on, color black, name Arial; align: wrap yes, ,vert bottom ,horz centre'
        )
        title1 = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, vert centre ,horz centre'
        )
        title_total = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, horz centre; pattern: pattern solid, fore_color gray40'
        )
        name = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, ,vert centre ,horz left'
        )
        for dept in self.dept_ids:
            j = 0
            i = 0
            worksheet = workbook.add_sheet(dept.name)
            worksheet.write(0, j + 5, company_name, header1)
            worksheet.write(0, j + 10, date_now, title1)
            worksheet.write(1, j + 10, time_now, title1)
            worksheet.write(
                2, j + 5,
                str(date_from) + ' - ' + str(date_to) + ' Year ' + str(year),
                title1)
            worksheet.write(4, j + 2, dhm, title1)
            worksheet.write(4, j + 3, chm, title1)
            worksheet.write(4, j + 4, chm, title1)

            worksheet.col(0 + j).width = 11000
            worksheet.col(1 + j).width = 8000
            worksheet.col(2 + j).width = 8000
            worksheet.col(3 + j).width = 8000
            worksheet.col(4 + j).width = 8000

            worksheet.row(5).height = 600
            worksheet.write(5, j + 1, 'Join Date', title1)
            worksheet.write(5, j + 2, 'Absent/Not Clock', title1)
            worksheet.write(5, j + 3, 'Late', title1)
            worksheet.write(5, j + 4, 'Leave Early', title1)
            j = 5
            all_leave_name.sort()
            print('all_leave_name = ', all_leave_name)
            for l in all_leave_name:
                if j == 5:
                    worksheet.col(j).width = 11000
                else:
                    worksheet.col(j).width = 8000
                worksheet.write(4, j, dhm, title1)
                worksheet.write(5, j, l, title1)
                j += 1
            worksheet.col(j + 0).width = 8000
            worksheet.col(j + 1).width = 10000
            worksheet.col(j + 2).width = 8000
            worksheet.col(j + 3).width = 8000
            worksheet.write(4, j + 0, 'Hour', title1)
            worksheet.write(4, j + 1, 'Hours', title1)
            worksheet.write(5, j + 0, 'Working Hours', title1)
            worksheet.write(
                5, j + 1,
                'Absent + late + leave early + all leaves excl. annual leave',
                title1)
            worksheet.write(5, j + 2, '% Loss of worktime', title1)
            worksheet.write(5, j + 3, '% Remaining of worktime', title1)
            i = 7
            k = 0
            print('j = ', j)
            # Count Section
            absent = 0
            late_total = 0
            late_length_total = 0.00
            early_total = 0
            early_length_total = 0.00
            for employee in all_employee.filtered(
                    lambda x: x.department_id == dept):
                count_employee_absent = self.count_employee_absent(
                    employee, date_from, date_to)
                absent += count_employee_absent

                emp_late_early = self.count_emp_late_early(
                    employee, date_from, date_to)
                late_total += emp_late_early['total_late']
                late_length_total += emp_late_early['length_late']
                early_total += emp_late_early['total_leave_early']
                early_length_total += emp_late_early['length_leave_early']

                late = emp_late_early['late']
                early = emp_late_early['leave_early']
                worksheet.write(i, k + 0,
                                employee.employee_no + ' ' + employee.name,
                                name)
                worksheet.write(i, k + 1, employee.join_date, title1)
                worksheet.write(i, k + 2,
                                str(count_employee_absent) + ' - 00:00',
                                title1)
                worksheet.write(i, k + 3, late, title1)
                worksheet.write(i, k + 4, early, title1)
                all_leave_name.sort()
                ls = 5
                for p in all_leave_name:
                    holiday_type_id = self.env['hr.holidays.status'].search([
                        ('name', '=', p)
                    ])
                    count_holiday = self.count_leave(employee, date_from,
                                                     date_to,
                                                     holiday_type_id.id)
                    worksheet.write(i, ls, count_holiday, title1)
                    ls += 1
                i += 1

            absent_to_display = str(absent) + ' - 00:00'
            late_to_display = self.converse_late_early(late_total,
                                                       late_length_total)
            leave_early_to_display = self.converse_late_early(
                early_total, early_length_total)

            for t in range(0, (j + 4), 1):
                if t == 2:
                    worksheet.write(6, t, absent_to_display, title_total)
                elif t == 3:
                    worksheet.write(6, t, late_to_display, title_total)
                elif t == 4:
                    worksheet.write(6, t, leave_early_to_display, title_total)
                else:
                    worksheet.write(6, t, None, title_total)

        fp = StringIO()
        workbook.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()
        res = base64.encodestring(data)
        mod_rec = self.env['sum.att.xls.report'].create({
            'name': 'Summary Attendance Report.xls',
            'file': res
        })
        return {
            'name': _('Summary Attendance Report'),
            'res_id': mod_rec.id,
            'view_type': 'form',
            "view_mode": 'form',
            'res_model': 'sum.att.xls.report',
            'type': 'ir.actions.act_window',
            'target': 'new',
        }
Example #37
0
class MARCReader(Reader):
    """
    An iterator class for reading a file of MARC21 records. 

    Simple usage:

        from pymarc import MARCReader

        ## pass in a file object
        reader = MARCReader(file('file.dat'))
        for record in reader:
            ...

        ## pass in marc in transmission format 
        reader = MARCReader(rawmarc)
        for record in reader:
            ...

    If you would like to have your Record object contain unicode strings
    use the to_unicode parameter:

        reader = MARCReader(file('file.dat'), to_unicode=True)

    This will decode from MARC-8 or UTF-8 depending on the value in the 
    MARC leader at position 9. 
    
    If you find yourself in the unfortunate position of having data that 
    is utf-8 encoded without the leader set appropriately you can use 
    the force_utf8 parameter:

        reader = MARCReader(file('file.dat'), to_unicode=True,
            force_utf8=True)
    
    If you find yourself in the unfortunate position of having data that is 
    mostly utf-8 encoded but with a few non-utf-8 characters, you can also use
    the utf8_handling parameter, which takes the same values ('strict', 
    'replace', and 'ignore') as the Python Unicode codecs (see 
    http://docs.python.org/library/codecs.html for more info).

    """
    def __init__(self,
                 marc_target,
                 to_unicode=False,
                 force_utf8=False,
                 hide_utf8_warnings=False,
                 utf8_handling='strict'):
        """
        The constructor to which you can pass either raw marc or a file-like
        object. Basically the argument you pass in should be raw MARC in 
        transmission format or an object that responds to read().
        """
        super(MARCReader, self).__init__()
        self.to_unicode = to_unicode
        self.force_utf8 = force_utf8
        self.hide_utf8_warnings = hide_utf8_warnings
        self.utf8_handling = utf8_handling
        if (hasattr(marc_target, "read") and callable(marc_target.read)):
            self.file_handle = marc_target
        else:
            self.file_handle = StringIO(marc_target)

    def next(self):
        """
        To support iteration. 
        """
        first5 = self.file_handle.read(5)
        if not first5:
            raise StopIteration
        if len(first5) < 5:
            raise RecordLengthInvalid

        length = int(first5)
        chunk = self.file_handle.read(length - 5)
        chunk = first5 + chunk
        record = Record(chunk,
                        to_unicode=self.to_unicode,
                        force_utf8=self.force_utf8,
                        hide_utf8_warnings=self.hide_utf8_warnings,
                        utf8_handling=self.utf8_handling)
        return record
Example #38
0
class Buffer(object):
    def __init__(self, fp):
        if not hasattr(fp, 'read'):
            self.fp = StringIO(fp)
        else:
            self.fp = fp

    @property
    def pos(self):
        return self.fp.tell()

    @pos.setter
    def pos(self, pos):
        self.fp.seek(pos)

    def __len__(self):
        temp = self.pos
        self.fp.seek(0, 2)
        ret = self.pos
        self.pos = temp
        return ret

    def __iadd__(self, o):
        self.pos += o
        return self

    def __isub__(self, o):
        self.pos -= o
        return self

    def read(self, count):
        return self.fp.read(count)

    def uint(self, count=None):
        if count is None:
            return unpack('<I', self.read(4))[0]
        return unpack('<' + 'I' * count, self.read(4 * count))

    def int(self, count=None):
        if count is None:
            return unpack('<i', self.read(4))[0]
        return unpack('<' + 'i' * count, self.read(4 * count))

    def ushort(self, count=None):
        if count is None:
            return unpack('<H', self.read(2))[0]
        return unpack('<' + 'H' * count, self.read(2 * count))

    def short(self, count=None):
        if count is None:
            return unpack('<h', self.read(2))[0]
        return unpack('<' + 'h' * count, self.read(2 * count))

    def uchar(self, count=None):
        if count is None:
            return unpack('<B', self.read(1))[0]
        return unpack('<' + 'B' * count, self.read(count))

    def char(self, count=None):
        if count is None:
            return unpack('<b', self.read(1))[0]
        return unpack('<' + 'b' * count, self.read(count))

    def float(self, count=None):
        if count is None:
            return unpack('<f', self.read(4))[0]
        return unpack('<' + 'f' * count, self.read(4 * count))

    def vec3(self, count=None):
        if count is None:
            return unpack('<fff', self.read(12))
        return tuple(unpack('<fff', self.read(12)) for i in xrange(count))
Example #39
0
def patches(a, bins):
    if not bins:
        return a

    plens = [len(x) for x in bins]
    pl = sum(plens)
    bl = len(a) + pl
    tl = bl + bl + pl  # enough for the patches and two working texts
    b1, b2 = 0, bl

    if not tl:
        return a

    m = StringIO()

    def move(dest, src, count):
        """move count bytes from src to dest

        The file pointer is left at the end of dest.
        """
        m.seek(src)
        buf = m.read(count)
        m.seek(dest)
        m.write(buf)

    # load our original text
    m.write(a)
    frags = [(len(a), b1)]

    # copy all the patches into our segment so we can memmove from them
    pos = b2 + bl
    m.seek(pos)
    for p in bins:
        m.write(p)

    def pull(dst, src, l):  # pull l bytes from src
        while l:
            f = src.pop()
            if f[0] > l:  # do we need to split?
                src.append((f[0] - l, f[1] + l))
                dst.append((l, f[1]))
                return
            dst.append(f)
            l -= f[0]

    def collect(buf, list):
        start = buf
        for l, p in reversed(list):
            move(buf, p, l)
            buf += l
        return (buf - start, start)

    for plen in plens:
        # if our list gets too long, execute it
        if len(frags) > 128:
            b2, b1 = b1, b2
            frags = [collect(b1, frags)]

        new = []
        end = pos + plen
        last = 0
        while pos < end:
            m.seek(pos)
            p1, p2, l = struct.unpack(">lll", m.read(12))
            pull(new, frags, p1 - last)  # what didn't change
            pull([], frags, p2 - p1)  # what got deleted
            new.append((l, pos + 12))  # what got added
            pos += l + 12
            last = p2
        frags.extend(reversed(new))  # what was left at the end

    t = collect(b2, frags)

    m.seek(t[1])
    return m.read(t[0])
def get_all_objects(text, beginning=r'{', debug=False):
    """ Zealous obtainer of mappings from a text, e.g. in javascript
    or JSON or whatever. Anything between '{' and '}'

    The monstrous advanced version.

    Not performant.

    Requires pyyaml.

    >>> st = 'a str with var stuff = {a: [{"v": 12}]} and such'
    >>> next(get_all_objects(st))
    {'a': [{'v': 12}]}
    """

    def _dbg_actual(st, *ar):
        print "D: ", st % ar

    _dbg = _dbg_actual if debug else (lambda *ar: None)

    import yaml

    # Allow any escape to be treated as the character itself.
    class ddd(dict):
        def __getitem__(self, key):
            try:
                return dict.__getitem__(self, key)
            except KeyError:
                self.__setitem__(key, key)
                return key

    class TheLoader(yaml.SafeLoader):
        ESCAPE_REPLACEMENTS = ddd(yaml.SafeLoader.ESCAPE_REPLACEMENTS)

    from cStringIO import StringIO
    # optimised slicing
    if isinstance(text, unicode):
        _dbg("encoding")
        text = text.encode('utf-8')
    _dbg("Length: %r", len(text))
    beginnings = list(indexall_re(text, beginning))
    _dbg("Beginnings amount: %r", len(beginnings))
    _dbg("Beginnings list: %r", beginnings[:15] + (beginnings[15:] and ['...']))
    text = StringIO(text)
    for from_ in beginnings:
        current_pos = text.tell()
        _dbg("At %r", current_pos)
#         if from_ < current_pos:
#             _dbg("Skipping the beginning %r" % (from_,))
#             # NOTE: this will skip the recursed structures.
#             # Which is quite helpful.
#             continue
        text.seek(from_)
        loader = TheLoader(text)
        try:
            art_res = loader.get_data()
        except Exception as exc:
            _dbg("Nope: %r / %s / %r", exc, exc, exc.args)
            text.seek(from_)
            _dbg("Stuff was: %r", repr(text.read(50)).decode('string-escape'))
            continue
        assert isinstance(part_res, dict)
        yield part_res
 def __str__(self):
   from cStringIO import StringIO
   s = StringIO()
   msg = self.show(out=s)
   s.seek(0)
   return s.read()
Example #42
0
class AppendedFiles():
    """
    Two WormHoleFiles one after another. 
    Takes 1 or 2 dict(s) as arguments; they're passed to WormHoleFiles'
    at the init.

    This is aimed at merging the TOC track starting at LBA45000 with 
    the last one to mimic one big track at LBA0 with the files at the 
    same LBA than the GD-ROM.
    """
    def __init__(self, wormfile1, wormfile2=None, *args, **kwargs):

        self._f1 = WormHoleFile(**wormfile1)

        self._f1.seek(0, 2)
        self._f1_len = self._f1.tell()
        self._f1.seek(0, 0)

        self._f2_len = 0
        if wormfile2:
            self._f2 = WormHoleFile(**wormfile2)

            self._f2.seek(0, 2)
            self._f2_len = self._f2.tell()
            self._f2.seek(0, 0)
        else:
            # So the rest of the code works for one or 2 files.
            self._f2 = StringIO('')

        self.seek(0, 0)

    def seek(self, a, b=0):
        if b == 0:
            self.MetaPointer = a
        if b == 1:
            self.MetaPointer += a
        if b == 2:
            self.MetaPointer = self._f1_len + self._f2_len - a

        if self.MetaPointer >= self._f1_len:
            self._f1.seek(0, 2)
            self._f2.seek(a - self._f1_len, 0)
        else:
            self._f1.seek(a, 0)
            self._f2.seek(0, 0)

    def read(self, length=None):
        if length == None:
            length = self._f1_len + self._f2_len - self.MetaPointer
        tmp = self.MetaPointer
        FutureOffset = self.MetaPointer + length
        if FutureOffset < self._f1_len:  # Read inside file1
            data = self._f1.read(length)
        elif tmp > self._f1_len:  # Read inside file2
            data = self._f2.read(length)
        else:  # Read end of file1 and start of file2
            data = self._f1.read(self._f1_len - tmp)
            data += self._f2.read(FutureOffset - self._f1_len)

        self.seek(FutureOffset)  # It might be enough to just update
        # self.MetaPointer, but this is safer.
        return data

    def tell(self):
        return self.MetaPointer

    def __enter__(self):
        return self

    def __exit__(self, type=None, value=None, traceback=None):
        # This is required to close files properly when using the with
        # statement. Which isn't required by ISO9660 anymore, but could
        # be useful for other uses so it stays!
        self._f1.__exit__()
        if self._f2_len:
            self._f2.__exit__()
Example #43
0
class HttpRequest(object):
    """A basic HTTP request."""

    # The encoding used in GET/POST dicts. None means use default setting.
    _encoding = None
    _upload_handlers = []

    def __init__(self):
        self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
        self.path = ''
        self.path_info = ''
        self.method = None

    def __repr__(self):
        return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
            (pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
            pformat(self.META))

    def get_host(self):
        """Returns the HTTP host using the environment or request headers."""
        # We try three options, in order of decreasing preference.
        if settings.USE_X_FORWARDED_HOST and (
            'HTTP_X_FORWARDED_HOST' in self.META):
            host = self.META['HTTP_X_FORWARDED_HOST']
        elif 'HTTP_HOST' in self.META:
            host = self.META['HTTP_HOST']
        else:
            # Reconstruct the host using the algorithm from PEP 333.
            host = self.META['SERVER_NAME']
            server_port = str(self.META['SERVER_PORT'])
            if server_port != (self.is_secure() and '443' or '80'):
                host = '%s:%s' % (host, server_port)
        return host

    def get_full_path(self):
        # RFC 3986 requires query string arguments to be in the ASCII range.
        # Rather than crash if this doesn't happen, we encode defensively.
        return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')

    def build_absolute_uri(self, location=None):
        """
        Builds an absolute URI from the location and the variables available in
        this request. If no location is specified, the absolute URI is built on
        ``request.get_full_path()``.
        """
        if not location:
            location = self.get_full_path()
        if not absolute_http_url_re.match(location):
            current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
                                         self.get_host(), self.path)
            location = urljoin(current_uri, location)
        return iri_to_uri(location)

    def is_secure(self):
        return os.environ.get("HTTPS") == "on"

    def is_ajax(self):
        return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'

    def _set_encoding(self, val):
        """
        Sets the encoding used for GET/POST accesses. If the GET or POST
        dictionary has already been created, it is removed and recreated on the
        next access (so that it is decoded correctly).
        """
        self._encoding = val
        if hasattr(self, '_get'):
            del self._get
        if hasattr(self, '_post'):
            del self._post

    def _get_encoding(self):
        return self._encoding

    encoding = property(_get_encoding, _set_encoding)

    def _initialize_handlers(self):
        self._upload_handlers = [uploadhandler.load_handler(handler, self)
                                 for handler in settings.FILE_UPLOAD_HANDLERS]

    def _set_upload_handlers(self, upload_handlers):
        if hasattr(self, '_files'):
            raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
        self._upload_handlers = upload_handlers

    def _get_upload_handlers(self):
        if not self._upload_handlers:
            # If thre are no upload handlers defined, initialize them from settings.
            self._initialize_handlers()
        return self._upload_handlers

    upload_handlers = property(_get_upload_handlers, _set_upload_handlers)

    def parse_file_upload(self, META, post_data):
        """Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
        self.upload_handlers = ImmutableList(
            self.upload_handlers,
            warning = "You cannot alter upload handlers after the upload has been processed."
        )
        parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
        return parser.parse()

    def _get_raw_post_data(self):
        if not hasattr(self, '_raw_post_data'):
            if self._read_started:
                raise Exception("You cannot access raw_post_data after reading from request's data stream")
            try:
                content_length = int(self.META.get('CONTENT_LENGTH', 0))
            except (ValueError, TypeError):
                # If CONTENT_LENGTH was empty string or not an integer, don't
                # error out. We've also seen None passed in here (against all
                # specs, but see ticket #8259), so we handle TypeError as well.
                content_length = 0
            if content_length:
                self._raw_post_data = self.read(content_length)
            else:
                self._raw_post_data = self.read()
            self._stream = StringIO(self._raw_post_data)
        return self._raw_post_data
    raw_post_data = property(_get_raw_post_data)

    def _mark_post_parse_error(self):
        self._post = QueryDict('')
        self._files = MultiValueDict()
        self._post_parse_error = True

    def _load_post_and_files(self):
        # Populates self._post and self._files
        if self.method != 'POST':
            self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
            return
        if self._read_started and not hasattr(self, '_raw_post_data'):
            self._mark_post_parse_error()
            return

        if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
            if hasattr(self, '_raw_post_data'):
                # Use already read data
                data = StringIO(self._raw_post_data)
            else:
                data = self
            try:
                self._post, self._files = self.parse_file_upload(self.META, data)
            except:
                # An error occured while parsing POST data.  Since when
                # formatting the error the request handler might access
                # self.POST, set self._post and self._file to prevent
                # attempts to parse POST data again.
                # Mark that an error occured.  This allows self.__repr__ to
                # be explicit about it instead of simply representing an
                # empty POST
                self._mark_post_parse_error()
                raise
        else:
            self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()

    ## File-like and iterator interface.
    ##
    ## Expects self._stream to be set to an appropriate source of bytes by
    ## a corresponding request subclass (WSGIRequest or ModPythonRequest).
    ## Also when request data has already been read by request.POST or
    ## request.raw_post_data, self._stream points to a StringIO instance
    ## containing that data.

    def read(self, *args, **kwargs):
        self._read_started = True
        return self._stream.read(*args, **kwargs)

    def readline(self, *args, **kwargs):
        self._read_started = True
        return self._stream.readline(*args, **kwargs)

    def xreadlines(self):
        while True:
            buf = self.readline()
            if not buf:
                break
            yield buf
    __iter__ = xreadlines

    def readlines(self):
        return list(iter(self))
Example #44
0
def start_export(request):
    if request.method == 'POST':
        #We retrieve all selected exporters
        listExporter = request.POST.getlist('my_exporters')
        instances = request.session['instancesExplore']
        listId = request.session['listIdToExport']
        xmlResults = []
        #Creation of ZIP file
        in_memory = StringIO()
        zip = zipfile.ZipFile(in_memory, "a")
        is_many_inst = len(instances) > 1
        for instance in instances:
            #Retrieve data
            sessionName = "resultsExplore" + eval(instance)['name']
            results = request.session[sessionName]
            if (len(results) > 0):
                for result in results:
                    if result['id'] in listId:
                        xmlResults.append(result)

            #For each data, we convert
            if len(xmlResults) > 0:
                #Init the folder name
                folder_name = None
                if is_many_inst:
                    folder_name = eval(instance)['name']
                #Check if the XSLT converter is asked. If yes, we start with this one because there is a specific treatment
                listXslt = request.POST.getlist('my_xslts')
                #Get the content of the file
                if len(listXslt) > 0:
                    exporter = XSLTExporter()
                    for xslt in listXslt:
                        xslt = ExporterXslt.objects.get(pk=xslt)
                        exporter._setXslt(xslt.content)
                        if folder_name == None:
                            exporter._transformAndZip(xslt.name, xmlResults,
                                                      zip)
                        else:
                            exporter._transformAndZip(
                                folder_name + "/" + xslt.name, xmlResults, zip)

                #We export for others exporters
                for exporter in listExporter:
                    exporter = get_exporter(exporter)
                    exporter._transformAndZip(folder_name, xmlResults, zip)

        zip.close()

        #ZIP file to be downloaded
        in_memory.seek(0)
        response = HttpResponse(in_memory.read())
        response["Content-Disposition"] = "attachment; filename=Results.zip"
        response['Content-Type'] = 'application/x-zip'
        request.session['listIdToExport'] = ''

        return response
    else:
        # We retrieve the result_id for each file the user wants to export
        listId = request.GET.getlist('listId[]')
        request.session['listIdToExport'] = listId

        # Get all schemaId from the listId
        listSchemas = XMLdata.getByIDsAndDistinctBy(listId, "schema")
        # XMLdata.objects(pk__in=listId).distinct(field="schema")

        export_form = ExportForm(listSchemas)

        upload_xslt_Form = UploadXSLTForm(listSchemas)
        template = loader.get_template('explore/export_start.html')
        context = Context({
            'export_form': export_form,
            'upload_xslt_Form': upload_xslt_Form,
            'nb_elts_exp': len(export_form.EXPORT_OPTIONS),
            'nb_elts_xslt': len(upload_xslt_Form.EXPORT_OPTIONS)
        })

        return HttpResponse(json.dumps({'template': template.render(context)}),
                            content_type='application/javascript')
Example #45
0
def download(request):
    """ Preps the download from the form post and delivers it

    Uses the JSON encoded grids and loops over them building the css, js and
    images required for all to work correctly

    Args:
        request
    """
    # redirect if it's not a post
    if request.method != 'POST':
        return HttpResponseRedirect('/')

    # Ensure our formatting is solid
    try:
        grids = json.loads(request.POST['grids'])
    except ValueError:  # Raised if grid is poorly formatted
        raise Http404

    max_cols = 0

    # Set up a string buffer for the zip (we'll serve it from memory)
    zip_buff = StringIO()
    zip_dl = ZipFile(zip_buff, 'w')
    """ Create a PNG for each grid
    ----------------------------------------------------------------------------
    """
    for g in grids:
        # Work out which grid has the highest number of cols
        if g['col_num'] > max_cols:
            max_cols = g['col_num']
        # Instantiate a Grid model object
        grid = Grid(
            min_width=g['min_width'],
            col_num=g['col_num'],
            padding_width=g['padding_width'],
            padding_type=g['padding_type'],
            gutter_width=g['gutter_width'],
            gutter_type=g['gutter_type'],
            upper=g['upper'],
        )
        # Draw the image into a string io buffer
        im_buff = grid.create_image()
        # If upper is false, set a better name
        if g['upper'] == False:
            upper_name = 'infinity'
        else:
            upper_name = g['upper']
        # Save the grid to the zip with a decent name
        im_name = "grid-%s_to_%s.png" % (g['min_width'], upper_name)
        zip_dl.writestr(im_name, im_buff.getvalue())
    """ Build the zip file in memory and serve it up
    ----------------------------------------------------------------------------
    """
    # A list of the templates we want to render and add to our zip
    templates = [
        'grids/downloads/gridpak.css',
        'grids/downloads/gridpak.js',
        'grids/downloads/gridpak.less',
        'grids/downloads/gridpak.scss',
        'grids/downloads/README.md',
    ]

    # Loop the templates list
    for template in templates:
        buff = StringIO()
        # Read the templates into string buffers
        buff.write(
            render_to_string(template, {
                'grids': grids,
                'max_cols': max_cols,
            }).encode('ascii', 'ignore'))

        zip_dl.writestr(template.replace('grids/downloads/', ''),
                        buff.getvalue())

    # Now add the demo directory as is
    path = os.path.join(settings.CUR_DIR, 'templates/grids/downloads/demo')
    for root, dirs, files in os.walk(path):
        for filename in files:
            # Writes the file to the zip inside one dir called demo
            zip_dl.write(os.path.join(root, filename),
                         os.path.join('demo', filename))

    zip_dl.close()

    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'attachment; filename=gridpak.zip'

    zip_buff.seek(0)
    response.write(zip_buff.read())

    return response
    def print_sg_leave_summary_report_wizard(self):
        cr, uid, context = self.env.args
        if context is None:
            context = {}
        context = dict(context)
        user_obj = self.env['res.users'].search([])
        ids = self.env['hr.employee'].search([('user_id', '=', uid)])
        data = self.read()[0]
        if 'all_employee' or 'all_leave' in data:
            context.update({
                'all_employee': data['all_employee'],
                'all_leave': data['all_leave']
            })
        context.update({
            'from_date': data['from_date'],
            'to_date': data['to_date'],
            'leave_type_id': data['leave_type_id'],
            'employee_id': data['employee_id']
        })
        workbook = xlwt.Workbook()
        worksheet = workbook.add_sheet('Sheet 1')
        borders = xlwt.Borders()
        border_style = xlwt.XFStyle()  # Create Style
        font = xlwt.Font()
        font.bold = True
        table_border = xlwt.easyxf('font: bold 1, height 200; align: wrap on;')
        table_border1 = xlwt.easyxf('font: height 200; align: wrap on;')
        table_border_center = xlwt.easyxf(
            "font: bold 0; align: wrap on, horiz centre;")
        table_border1_center = xlwt.easyxf("align: wrap on, horiz centre;")
        borders.top = xlwt.Borders.MEDIUM
        borders.bottom = xlwt.Borders.MEDIUM
        borders.left = xlwt.Borders.MEDIUM
        borders.right = xlwt.Borders.MEDIUM
        table_border.borders = borders
        table_border1.borders = borders
        table_border_center.borders = borders
        table_border1_center.borders = borders
        header = xlwt.easyxf('font: bold 1, height 300', 'align: center')
        header2 = xlwt.easyxf('font: bold 1, height 200', 'align: left')
        header3 = xlwt.easyxf('font: bold 1 , height 250', 'align: left')
        header1 = xlwt.easyxf("align: wrap on;")
        borders.top = xlwt.Borders.MEDIUM
        borders.bottom = xlwt.Borders.MEDIUM
        borders.left = xlwt.Borders.MEDIUM
        borders.right = xlwt.Borders.MEDIUM
        header1.borders = borders

        worksheet.col(0).width = 7000
        worksheet.col(1).width = 7000
        worksheet.col(2).width = 7000
        worksheet.col(3).width = 7000
        worksheet.col(4).width = 7000
        worksheet.col(5).width = 6000
        worksheet.col(6).width = 6000
        worksheet.col(7).width = 6000
        worksheet.col(8).width = 6000
        worksheet.col(9).width = 7000
        worksheet.row(0).height = 600
        worksheet.row(1).height = 300
        worksheet.row(2).height = 400
        worksheet.row(3).height = 300
        worksheet.row(4).height = 300
        worksheet.row(5).height = 400
        worksheet.row(6).height = 300
        worksheet.row(7).height = 300
        worksheet.row(8).height = 400
        worksheet.row(9).height = 300

        #        path = os.path.abspath(os.path.dirname(__file__))
        #        path += '/../static/img/abced.bmp'
        #        worksheet.insert_bitmap(path,0,2,1)

        worksheet.write_merge(0, 0, 3, 5, ids.company_id.name, header)

        emp_obj = self.env['hr.employee']
        if context["employee_id"]:
            emp_record = self.env['hr.employee'].browse(
                context["employee_id"][0])
            department = emp_record.department_id and emp_record.department_id.name or ''
            emp_no = emp_record.identification_id or ''
            emp_title = emp_record.job_id and emp_record.job_id.name or ''
            joined_year = emp_record.joined_year or ''
            leave_structure = emp_record.leave_config_id and emp_record.leave_config_id.name or ''
        else:
            employee_res = self.env['hr.employee'].search([])
        leave_obj = self.env['hr.holidays.status']
        holiday_obj = self.env['hr.holidays']
        if context['all_leave'] == True:
            leave_ids = leave_obj.search([])

        args = [('date_start', '<=', context.get('from_date')),
                ('date_stop', '>=', context.get('to_date'))]
        fiscalyear_id = self.env['hr.year'].search(args)
        if fiscalyear_id:
            fiscalyear_id = fiscalyear_id[0]
        else:
            raise ValidationError(_('You can search only single year records'))

        from_date_date = datetime.strptime(
            context["from_date"] + " 00:00:00",
            DEFAULT_SERVER_DATETIME_FORMAT) - relativedelta(hours=8)
        from_date_str = from_date_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
        from_to_date = datetime.strptime(
            context["to_date"] + " 23:59:59",
            DEFAULT_SERVER_DATETIME_FORMAT) - relativedelta(hours=8)
        from_to_str = from_to_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)

        #       WHEN BOTH CHECKBOX IS TRUE
        if context['all_employee'] == True and context['all_leave'] == True:
            row = 2
            col = 0
            self._get_company_info(worksheet, row, header2)
            row = row + 4
            worksheet.row(row + 4).height = 300
            leave_name = ''
            leave_ids = leave_obj.search([])
            for leave in leave_ids:
                row = row + 1
                worksheet.row(row).height = 300
                worksheet.row(row + 2).height = 600
                leave_name = leave.name2 if leave.name2 else leave.name
                worksheet.write_merge(row, row, col, 2, leave_name, header3)
                self._get_employee_header(worksheet, row, table_border)
                row = row + 3
                col = 0
                for emp_record in employee_res:
                    emp_id = emp_record.id
                    leave_id = leave.id
                    #                   DEPARTMENT
                    if emp_record.department_id and emp_record.department_id.name:
                        worksheet.write(row, col + 0,
                                        emp_record.department_id.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 0, '', table_border1)
#                   IDENTIFICATION NUMBER
                    if emp_record.identification_id:
                        worksheet.write(row, col + 1,
                                        emp_record.identification_id,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 1, '', table_border1)
#                   EMPLOYEE NAME
                    if emp_record.name:
                        worksheet.write(row, col + 2, emp_record.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 2, '', table_border1)
#                   DATE JOINED
                    if emp_record.join_date:
                        emp_j_date = datetime.strptime(
                            emp_record.join_date, DEFAULT_SERVER_DATE_FORMAT)
                        emp_join_dt = str(
                            emp_j_date.strftime('%d')) + '-' + str(
                                emp_j_date.strftime('%m')) + '-' + str(
                                    emp_j_date.strftime('%Y'))
                        worksheet.write(row, col + 3, emp_join_dt,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 3, '', table_border1)
#                   SERVICE YEARS
                    if emp_record.joined_year:
                        worksheet.write(row, col + 4, emp_record.joined_year,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 4, '', table_border1)
#                   LEAVE STRUCTURE
                    if emp_record.leave_config_id and emp_record.leave_config_id.name:
                        worksheet.write(row, col + 5,
                                        emp_record.leave_config_id.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 5, '', table_border1)
#                   CARRY FORWARD
                    carry_leave = self._get_carry_leave(
                        emp_id, leave_id, fiscalyear_id)
                    if carry_leave and carry_leave[
                            0] and carry_leave[0] != None:
                        worksheet.write(row, col + 6, int(carry_leave[0]),
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 6, 0, table_border1_center)
#                   CURRENT YEAR
                    total_leave = self._get_total_leave(
                        emp_id, leave_id, fiscalyear_id)
                    if total_leave != 0:
                        worksheet.write(row, col + 7, int(total_leave),
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 7, 0, table_border1_center)
#                   PENDING
                    pending_leave = self._get_pending_leave(
                        emp_id, leave_id, from_date_str, from_to_str)
                    if pending_leave and pending_leave[
                            0] and pending_leave[0] != None:
                        worksheet.write(row, col + 8, pending_leave[0],
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 8, 0, table_border1_center)
#                   TAKEN
                    taken_leave = self._get_taken_leave(
                        emp_id, leave_id, from_date_str, from_to_str)
                    if taken_leave and taken_leave[
                            0] and taken_leave[0] != None:
                        worksheet.write(row, col + 9, taken_leave[0],
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 9, 0, table_border1_center)
#                   BALANCE YTD
                    if total_leave != 0:
                        if taken_leave and taken_leave[
                                0] and taken_leave[0] != None:
                            after_blc = int(total_leave) - taken_leave[0]
                            worksheet.write(row, col + 10, after_blc or 0,
                                            table_border1_center)
                        else:
                            worksheet.write(row, col + 10, total_leave,
                                            table_border1_center)
                    else:
                        worksheet.write(row, col + 10, 0, table_border1_center)
#                   BALANCE MTD
                    earn_leaves = self._get_earn_leave(emp_id, leave_id,
                                                       context["from_date"],
                                                       context["to_date"],
                                                       fiscalyear_id)
                    if earn_leaves != 0:
                        if taken_leave and taken_leave[
                                0] and taken_leave[0] != None:
                            earn_leaves = earn_leaves - taken_leave[0]
                            worksheet.write(row, col + 11, earn_leaves,
                                            table_border1_center)
                        else:
                            worksheet.write(row, col + 11, earn_leaves,
                                            table_border1_center)
                    else:
                        worksheet.write(row, col + 11, 0, table_border1_center)
                    row = row + 1
            row = row + 1

#       WHEN EMPLOYEE CHECKBOX TRUE
        elif context['all_employee'] == True and context['all_leave'] == False:
            row = 2
            col = 0
            self._get_company_info(worksheet, row, header2)
            row = row + 4
            leave_type = str(
                context["leave_type_id"][1]).upper() + ' LEAVE RECORD'
            worksheet.row(row).height = 400
            worksheet.write_merge(row, row, col, 2, leave_type, header3)
            worksheet.row(row + 2).height = 500
            self._get_employee_header(worksheet, row, table_border)
            row = row + 3
            col = 0
            for emp_record in employee_res:
                emp_id = emp_record.id
                leave_id = context["leave_type_id"][0]
                pending_leave = self._get_pending_leave(
                    emp_id, leave_id, from_date_str, from_to_str)
                taken_leave = self._get_taken_leave(emp_id, leave_id,
                                                    from_date_str, from_to_str)
                total_leave = self._get_total_leave(emp_id, leave_id,
                                                    fiscalyear_id)
                carry_leave = self._get_carry_leave(emp_id, leave_id,
                                                    fiscalyear_id)
                earn_leaves = self._get_earn_leave(emp_id, leave_id,
                                                   context["from_date"],
                                                   context["to_date"],
                                                   fiscalyear_id)
                #               DEPARTMENT
                if emp_record.department_id and emp_record.department_id.name:
                    worksheet.write(row, col + 0,
                                    emp_record.department_id.name,
                                    table_border1)
                else:
                    worksheet.write(row, col + 0, '', table_border1)
#               IDENTIFICATION NUMBER
                if emp_record.identification_id:
                    worksheet.write(row, col + 1, emp_record.identification_id,
                                    table_border1)
                else:
                    worksheet.write(row, col + 1, '', table_border1)
#               EMPLOYEE NAME
                if emp_record.name:
                    worksheet.write(row, col + 2, emp_record.name,
                                    table_border1)
                else:
                    worksheet.write(row, col + 2, emp_record.name,
                                    table_border1)
#               JOIN DATE
                if emp_record.join_date:
                    emp_j_date = datetime.strptime(emp_record.join_date,
                                                   DEFAULT_SERVER_DATE_FORMAT)
                    emp_join_dt = str(emp_j_date.strftime('%d')) + '-' + str(
                        emp_j_date.strftime('%m')) + '-' + str(
                            emp_j_date.strftime('%Y'))
                    worksheet.write(row, col + 3, emp_join_dt, table_border1)
                else:
                    worksheet.write(row, col + 3, '', table_border1)
#               SERVICE YEARS
                if emp_record.joined_year:
                    worksheet.write(row, col + 4, emp_record.joined_year,
                                    table_border1)
                else:
                    worksheet.write(row, col + 4, '', table_border1)
#               LEAVE STRUCTURE
                if emp_record.leave_config_id and emp_record.leave_config_id.name:
                    worksheet.write(row, col + 5,
                                    emp_record.leave_config_id.name,
                                    table_border1)
                else:
                    worksheet.write(row, col + 5, '', table_border1)
#               CARRY FORWARD
                if carry_leave and carry_leave[0] and carry_leave[0] != None:
                    worksheet.write(row, col + 6, int(carry_leave[0]),
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 6, 0, table_border1_center)
#               CURRENT YEAR TOTAL LEAVE
                if total_leave != 0:
                    worksheet.write(row, col + 7, int(total_leave),
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 7, 0, table_border1_center)
#               PENDING
                if pending_leave and pending_leave[
                        0] and pending_leave[0] != None:
                    worksheet.write(row, col + 8, pending_leave[0],
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 8, 0, table_border1_center)
#               TAKEN
                if taken_leave and taken_leave[0] and taken_leave[0] != None:
                    worksheet.write(row, col + 9, taken_leave[0],
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 9, 0, table_border1_center)
#               BALANCE YTD
                if total_leave != 0:
                    if taken_leave and taken_leave[
                            0] and taken_leave[0] != None:
                        after_blc = int(total_leave) - taken_leave[0]
                        worksheet.write(row, col + 10, after_blc or 0,
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 10, total_leave,
                                        table_border1_center)
                else:
                    worksheet.write(row, col + 10, 0, table_border1_center)
#               BALANCE MTD
                if earn_leaves != 0:
                    if taken_leave and taken_leave[
                            0] and taken_leave[0] != None:
                        earn_leaves = earn_leaves - taken_leave[0]
                        worksheet.write(row, col + 11, earn_leaves,
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 11, earn_leaves,
                                        table_border1_center)
                else:
                    worksheet.write(row, col + 11, 0, table_border1_center)
                row += 1
            row = row + 1

#         WHEN LEAVE CHECK BOX IS TRUE
        elif context['all_leave'] == True and context['all_employee'] == False:
            row = 2
            col = 0
            self._get_company_info(worksheet, row, header2)
            row = row + 4
            #            worksheet.row(row + 4).height = 500
            leave_name = ''
            if emp_record.leave_config_id and emp_record.leave_config_id.holiday_group_config_line_ids and emp_record.leave_config_id.holiday_group_config_line_ids.ids:
                leave_ids = []
                for leave_config_type in emp_record.leave_config_id.holiday_group_config_line_ids:
                    leave_ids.append(leave_config_type.leave_type_id.id)
                for leave in leave_obj.browse(leave_ids):
                    row = row + 1
                    worksheet.row(row).height = 300
                    worksheet.row(row + 2).height = 500
                    leave_name = leave.name2 if leave.name2 else leave.name
                    worksheet.write_merge(row, row, col, 2, leave_name,
                                          header3)
                    self._get_employee_header(worksheet, row, table_border)
                    row = row + 3
                    col = 0
                    emp_id = emp_record.id
                    leave_id = leave.id
                    pending_leave = self._get_pending_leave(
                        emp_id, leave_id, from_date_str, from_to_str)
                    taken_leave = self._get_taken_leave(
                        emp_id, leave_id, from_date_str, from_to_str)
                    total_leave = self._get_total_leave(
                        emp_id, leave_id, fiscalyear_id)
                    carry_leave = self._get_carry_leave(
                        emp_id, leave_id, fiscalyear_id)
                    earn_leaves = self._get_earn_leave(emp_id, leave_id,
                                                       context["from_date"],
                                                       context["to_date"],
                                                       fiscalyear_id)
                    #                   DEPARTMENT
                    if emp_record.department_id and emp_record.department_id.name:
                        worksheet.write(row, col + 0,
                                        emp_record.department_id.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 0, '', table_border1)
    #                   IDENTIFICATION NUMBER
                    if emp_record.identification_id:
                        worksheet.write(row, col + 1,
                                        emp_record.identification_id,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 1, '', table_border1)

                    if emp_record.name:
                        worksheet.write(row, col + 2, emp_record.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 2, emp_record.name,
                                        table_border1)

                    if emp_record.join_date:
                        emp_j_date = datetime.strptime(
                            emp_record.join_date, DEFAULT_SERVER_DATE_FORMAT)
                        emp_join_dt = str(
                            emp_j_date.strftime('%d')) + '-' + str(
                                emp_j_date.strftime('%m')) + '-' + str(
                                    emp_j_date.strftime('%Y'))
                        worksheet.write(row, col + 3, emp_join_dt,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 3, '', table_border1)

                    if emp_record.joined_year:
                        worksheet.write(row, col + 4, emp_record.joined_year,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 4, '', table_border1)

                    if emp_record.leave_config_id and emp_record.leave_config_id.name:
                        worksheet.write(row, col + 5,
                                        emp_record.leave_config_id.name,
                                        table_border1)
                    else:
                        worksheet.write(row, col + 5, '', table_border1)

    #                       CARRY FORWARD
                    if carry_leave and carry_leave[
                            0] and carry_leave[0] != None:
                        worksheet.write(row, col + 6, int(carry_leave[0]),
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 6, 0, table_border1_center)
    #                       CURRENT YEAR TOTAL LEAVE
                    if total_leave != 0:
                        worksheet.write(row, col + 7, int(total_leave),
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 7, 0, table_border1_center)
    #                       PENDING
                    if pending_leave and pending_leave[
                            0] and pending_leave[0] != None:
                        worksheet.write(row, col + 8, pending_leave[0],
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 8, 0, table_border1_center)
    #                       TAKEN
                    if taken_leave and taken_leave[
                            0] and taken_leave[0] != None:
                        worksheet.write(row, col + 9, taken_leave[0],
                                        table_border1_center)
                    else:
                        worksheet.write(row, col + 9, 0, table_border1_center)
    #                    BALANCE YTD
                    if total_leave != 0:
                        if taken_leave and taken_leave[
                                0] and taken_leave[0] != None:
                            after_blc = int(total_leave) - taken_leave[0]
                            worksheet.write(row, col + 10, after_blc or 0,
                                            table_border1_center)
                        else:
                            worksheet.write(row, col + 10, total_leave,
                                            table_border1_center)
                    else:
                        worksheet.write(row, col + 10, 0, table_border1_center)

    #                   BALANCE MTD
                    if earn_leaves != 0:
                        if taken_leave and taken_leave[
                                0] and taken_leave[0] != None:
                            earn_leaves = earn_leaves - taken_leave[0]
                            worksheet.write(row, col + 11, earn_leaves,
                                            table_border1_center)
                        else:
                            worksheet.write(row, col + 11, earn_leaves,
                                            table_border1_center)
                    else:
                        worksheet.write(row, col + 11, 0, table_border1_center)
                    row += 1
                row = row + 1

#         WHEN BOTH CHECKBOX IS FALSE
        else:
            row = 2
            col = 0
            self._get_company_info(worksheet, row, header2)
            row = row + 5
            worksheet.row(row).height = 340
            worksheet.write_merge(row, row, col, 2,
                                  context["leave_type_id"][1], header3)
            worksheet.row(row + 2).height = 500
            self._get_employee_header(worksheet, row, table_border)
            row = row + 3
            col = 0
            emp_id = emp_record.id
            leave_id = context["leave_type_id"][0]
            pending_leave = self._get_pending_leave(emp_id, leave_id,
                                                    from_date_str, from_to_str)
            taken_leave = self._get_taken_leave(emp_id, leave_id,
                                                from_date_str, from_to_str)
            total_leave = self._get_total_leave(emp_id, leave_id,
                                                fiscalyear_id)
            carry_leave = self._get_carry_leave(emp_id, leave_id,
                                                fiscalyear_id)
            earn_leaves = self._get_earn_leave(emp_id, leave_id,
                                               context["from_date"],
                                               context["to_date"],
                                               fiscalyear_id)
            #           DEPARTMENT
            if emp_record.department_id and emp_record.department_id.name:
                worksheet.write(row, col + 0, emp_record.department_id.name,
                                table_border1)
            else:
                worksheet.write(row, col + 0, '', table_border1)
#           IDENTIFICATION NUMBER
            if emp_record.identification_id:
                worksheet.write(row, col + 1, emp_record.identification_id,
                                table_border1)
            else:
                worksheet.write(row, col + 1, '', table_border1)
#           EMPLOYEE NAME
            if emp_record.name:
                worksheet.write(row, col + 2, emp_record.name, table_border1)
            else:
                worksheet.write(row, col + 2, emp_record.name, table_border1)
#           DATE JOINED
            if emp_record.join_date:
                emp_j_date = datetime.strptime(emp_record.join_date,
                                               DEFAULT_SERVER_DATE_FORMAT)
                emp_join_dt = str(emp_j_date.strftime('%d')) + '-' + str(
                    emp_j_date.strftime('%m')) + '-' + str(
                        emp_j_date.strftime('%Y'))
                worksheet.write(row, col + 3, emp_join_dt, table_border1)
            else:
                worksheet.write(row, col + 3, '', table_border1)
#           SERVICE YEARS
            if emp_record.joined_year:
                worksheet.write(row, col + 4, emp_record.joined_year,
                                table_border1)
            else:
                worksheet.write(row, col + 4, '', table_border1)
#           LEAVE STRUCTURE
            if emp_record.leave_config_id and emp_record.leave_config_id.name:
                worksheet.write(row, col + 5, emp_record.leave_config_id.name,
                                table_border1)
            else:
                worksheet.write(row, col + 5, '', table_border1)
#           CARRY FORWARD
            if carry_leave and carry_leave[0] and carry_leave[0] != None:
                worksheet.write(row, col + 6, int(carry_leave[0]),
                                table_border1_center)
            else:
                worksheet.write(row, col + 6, 0, table_border1_center)
#           CURRENT YEAR
            if total_leave != 0:
                worksheet.write(row, col + 7, int(total_leave),
                                table_border1_center)
            else:
                worksheet.write(row, col + 7, 0, table_border1_center)
#           PENDING
            if pending_leave and pending_leave[0] and pending_leave[0] != None:
                worksheet.write(row, col + 8, pending_leave[0],
                                table_border1_center)
            else:
                worksheet.write(row, col + 8, 0, table_border1_center)
#           TAKEN
            if taken_leave and taken_leave[0] and taken_leave[0] != None:
                worksheet.write(row, col + 9, taken_leave[0],
                                table_border1_center)
            else:
                worksheet.write(row, col + 9, 0, table_border1_center)
#           BALANCE YTD
            if total_leave != 0:
                if taken_leave and taken_leave[0] and taken_leave[0] != None:
                    after_blc = int(total_leave) - taken_leave[0]
                    worksheet.write(row, col + 10, after_blc or 0,
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 10, total_leave,
                                    table_border1_center)
            else:
                worksheet.write(row, col + 10, 0, table_border1_center)
#           BALANCE MTD
            if earn_leaves != 0:
                if taken_leave and taken_leave[0] and taken_leave[0] != None:
                    earn_leaves = earn_leaves - taken_leave[0]
                    worksheet.write(row, col + 11, earn_leaves,
                                    table_border1_center)
                else:
                    worksheet.write(row, col + 11, earn_leaves,
                                    table_border1_center)
            else:
                worksheet.write(row, col + 11, 0, table_border1_center)
            row += 1
        row = row + 1

        fp = StringIO()
        workbook.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()
        res = base64.encodestring(data)
        module_rec = self.env['excel.sg.leave.summary.report'].create({
            'name':
            'Leave summary.xls',
            'file':
            res
        })
        return {
            'name': _('Leave Summary Report'),
            'res_id': module_rec.id,
            'view_type': 'form',
            "view_mode": 'form',
            'res_model': 'excel.sg.leave.summary.report',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'context': context,
        }
Example #47
0
def export_query():
    """export from report builder"""
    form_params = get_form_params()
    form_params["limit_page_length"] = None
    form_params["as_list"] = True
    doctype = form_params.doctype
    add_totals_row = None
    file_format_type = form_params["file_format_type"]

    del form_params["doctype"]
    del form_params["file_format_type"]

    if 'add_totals_row' in form_params and form_params['add_totals_row'] == '1':
        add_totals_row = 1
        del form_params["add_totals_row"]

    frappe.permissions.can_export(doctype, raise_exception=True)

    if 'selected_items' in form_params:
        si = json.loads(frappe.form_dict.get('selected_items'))
        form_params["filters"] = {"name": ("in", si)}
        del form_params["selected_items"]

    db_query = DatabaseQuery(doctype)
    ret = db_query.execute(**form_params)

    if add_totals_row:
        ret = append_totals_row(ret)

    data = [['Sr'] + get_labels(db_query.fields, doctype)]
    for i, row in enumerate(ret):
        data.append([i + 1] + list(row))

    if file_format_type == "CSV":

        # convert to csv
        import csv
        from cStringIO import StringIO

        f = StringIO()
        writer = csv.writer(f)
        for r in data:
            # encode only unicode type strings and not int, floats etc.
            writer.writerow(
                map(
                    lambda v: isinstance(v, unicode) and v.encode('utf-8') or
                    v, r))

        f.seek(0)
        frappe.response['result'] = unicode(f.read(), 'utf-8')
        frappe.response['type'] = 'csv'
        frappe.response['doctype'] = doctype

    elif file_format_type == "Excel":

        from frappe.utils.xlsxutils import make_xlsx
        xlsx_file = make_xlsx(data, doctype)

        frappe.response['filename'] = doctype + '.xlsx'
        frappe.response['filecontent'] = xlsx_file.getvalue()
        frappe.response['type'] = 'binary'
Example #48
0
class PDFContentParser(PSStackParser):

  def __init__(self, streams):
    self.streams = streams
    self.istream = 0
    PSStackParser.__init__(self, None)
    return

  def fillfp(self):
    if not self.fp:
      if self.istream < len(self.streams):
        strm = stream_value(self.streams[self.istream])
        self.istream += 1
      else:
        raise PSEOF('Unexpected EOF, file truncated?')
      self.fp = StringIO(strm.get_data())
    return

  def seek(self, pos):
    self.fillfp()
    PSStackParser.seek(self, pos)
    return

  def fillbuf(self):
    if self.charpos < len(self.buf): return
    while 1:
      self.fillfp()
      self.bufpos = self.fp.tell()
      self.buf = self.fp.read(self.BUFSIZ)
      if self.buf: break
      self.fp = None
    self.charpos = 0
    return

  def get_inline_data(self, pos, target='EI'):
    self.seek(pos)
    i = 0
    data = ''
    while i <= len(target):
      self.fillbuf()
      if i:
        c = self.buf[self.charpos]
        data += c
        self.charpos += 1
        if len(target) <= i and c.isspace():
          i += 1
        elif i < len(target) and c == target[i]:
          i += 1
        else:
          i = 0
      else:
        try:
          j = self.buf.index(target[0], self.charpos)
          #print 'found', (0, self.buf[j:j+10])
          data += self.buf[self.charpos:j+1]
          self.charpos = j+1
          i = 1
        except ValueError:
          data += self.buf[self.charpos:]
          self.charpos = len(self.buf)
    data = data[:-(len(target)+1)] # strip the last part
    data = re.sub(r'(\x0d\x0a|[\x0d\x0a])', '', data)
    return (pos, data)

  def flush(self):
    self.add_results(*self.popall())
    return

  KEYWORD_BI = PSKeywordTable.intern('BI')
  KEYWORD_ID = PSKeywordTable.intern('ID')
  KEYWORD_EI = PSKeywordTable.intern('EI')
  def do_keyword(self, pos, token):
    if token is self.KEYWORD_BI:
      # inline image within a content stream
      self.start_type(pos, 'inline')
    elif token is self.KEYWORD_ID:
      try:
        (_, objs) = self.end_type('inline')
        if len(objs) % 2 != 0:
          raise PSTypeError('Invalid dictionary construct: %r' % objs)
        d = dict( (literal_name(k), v) for (k,v) in choplist(2, objs) )
        (pos, data) = self.get_inline_data(pos+len('ID '))
        obj = PDFStream(d, data)
        self.push((pos, obj))
        self.push((pos, self.KEYWORD_EI))
      except PSTypeError:
        if STRICT: raise
    else:
      self.push((pos, token))
    return
Example #49
0
class TZlibTransport(TTransportBase, CReadableTransport):
    '''
  Class that wraps a transport with zlib, compressing writes
  and decompresses reads, using the python standard
  library zlib module.
  '''

    # Read buffer size for the python fastbinary C extension,
    # the TBinaryProtocolAccelerated class.
    DEFAULT_BUFFSIZE = 4096

    def __init__(self, trans, compresslevel=9):
        '''
    Create a new TZlibTransport, wrapping C{trans}, another
    TTransport derived object.
    
    @param trans: A thrift transport object, i.e. a TSocket() object.
    @type trans: TTransport
    @param compresslevel: The zlib compression level, ranging
    from 0 (no compression) to 9 (best compression).  Default is 9.
    @type compresslevel: int
    '''
        self.__trans = trans
        self.compresslevel = compresslevel
        self.__rbuf = StringIO()
        self.__wbuf = StringIO()
        self._init_zlib()
        self._init_stats()

    def _reinit_buffers(self):
        '''
    Internal method to initialize/reset the internal StringIO objects
    for read and write buffers.
    '''
        self.__rbuf = StringIO()
        self.__wbuf = StringIO()

    def _init_stats(self):
        '''
    Internal method to reset the internal statistics counters
    for compression ratios and bandwidth savings.
    '''
        self.bytes_in = 0
        self.bytes_out = 0
        self.bytes_in_comp = 0
        self.bytes_out_comp = 0

    def _init_zlib(self):
        '''
    Internal method for setting up the zlib compression and
    decompression objects.
    '''
        self._zcomp_read = zlib.decompressobj()
        self._zcomp_write = zlib.compressobj(self.compresslevel)

    def getCompRatio(self):
        '''
    Get the current measured compression ratios (in,out) from
    this transport.
    
    Returns a tuple of: 
    (inbound_compression_ratio, outbound_compression_ratio)
    
    The compression ratios are computed as:
        compressed / uncompressed

    E.g., data that compresses by 10x will have a ratio of: 0.10
    and data that compresses to half of ts original size will
    have a ratio of 0.5
    
    None is returned if no bytes have yet been processed in
    a particular direction.
    '''
        r_percent, w_percent = (None, None)
        if self.bytes_in > 0:
            r_percent = self.bytes_in_comp / self.bytes_in
        if self.bytes_out > 0:
            w_percent = self.bytes_out_comp / self.bytes_out
        return (r_percent, w_percent)

    def getCompSavings(self):
        '''
    Get the current count of saved bytes due to data
    compression.
    
    Returns a tuple of:
    (inbound_saved_bytes, outbound_saved_bytes)
    
    Note: if compression is actually expanding your
    data (only likely with very tiny thrift objects), then
    the values returned will be negative.
    '''
        r_saved = self.bytes_in - self.bytes_in_comp
        w_saved = self.bytes_out - self.bytes_out_comp
        return (r_saved, w_saved)

    def isOpen(self):
        '''Return the underlying transport's open status'''
        return self.__trans.isOpen()

    def open(self):
        """Open the underlying transport"""
        self._init_stats()
        return self.__trans.open()

    def listen(self):
        '''Invoke the underlying transport's listen() method'''
        self.__trans.listen()

    def accept(self):
        '''Accept connections on the underlying transport'''
        return self.__trans.accept()

    def close(self):
        '''Close the underlying transport,'''
        self._reinit_buffers()
        self._init_zlib()
        return self.__trans.close()

    def read(self, sz):
        '''
    Read up to sz bytes from the decompressed bytes buffer, and
    read from the underlying transport if the decompression
    buffer is empty.
    '''
        ret = self.__rbuf.read(sz)
        if len(ret) > 0:
            return ret
        # keep reading from transport until something comes back
        while True:
            if self.readComp(sz):
                break
        ret = self.__rbuf.read(sz)
        return ret

    def readComp(self, sz):
        '''
    Read compressed data from the underlying transport, then
    decompress it and append it to the internal StringIO read buffer
    '''
        zbuf = self.__trans.read(sz)
        zbuf = self._zcomp_read.unconsumed_tail + zbuf
        buf = self._zcomp_read.decompress(zbuf)
        self.bytes_in += len(zbuf)
        self.bytes_in_comp += len(buf)
        old = self.__rbuf.read()
        self.__rbuf = StringIO(old + buf)
        if len(old) + len(buf) == 0:
            return False
        return True

    def write(self, buf):
        '''
    Write some bytes, putting them into the internal write
    buffer for eventual compression.
    '''
        self.__wbuf.write(buf)

    def flush(self):
        '''
    Flush any queued up data in the write buffer and ensure the
    compression buffer is flushed out to the underlying transport
    '''
        wout = self.__wbuf.getvalue()
        if len(wout) > 0:
            zbuf = self._zcomp_write.compress(wout)
            self.bytes_out += len(wout)
            self.bytes_out_comp += len(zbuf)
        else:
            zbuf = ''
        ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
        self.bytes_out_comp += len(ztail)
        if (len(zbuf) + len(ztail)) > 0:
            self.__wbuf = StringIO()
            self.__trans.write(zbuf + ztail)
        self.__trans.flush()

    @property
    def cstringio_buf(self):
        '''Implement the CReadableTransport interface'''
        return self.__rbuf

    def cstringio_refill(self, partialread, reqlen):
        '''Implement the CReadableTransport interface for refill'''
        retstring = partialread
        if reqlen < self.DEFAULT_BUFFSIZE:
            retstring += self.read(self.DEFAULT_BUFFSIZE)
        while len(retstring) < reqlen:
            retstring += self.read(reqlen - len(retstring))
        self.__rbuf = StringIO(retstring)
        return self.__rbuf
Example #50
0
    def callable(self,
                 base_url,
                 dir,
                 root="",
                 handlers=None,
                 server_admin=None):
        """
        Verwandelt CherryPy in einen CGI-Server

        :param base_url: Absolute HTTP-URL of the CGI base directory.
            z.B.: "/cgi"

        :param dir: Absoluter oder relativer Pfad zum Ordner mit den CGI-Dateien.
            Wird der Pfad relativ angegeben, dann wird er an *root* angehängt.
            Gleiches Verhalten wie beim Staticdir-Tool.

        :param root: An diesen Pfad wird *dir* angehängt, falls *dir* nicht
            absolut übergeben wurde.

        :param handlers: Dictionary mit den Dateiendungen und den
            zugehörigen Interpretern. z.B.::

                {".php": "/usr/bin/php-cgi", ".py": "/usr/bin/python"}

            Es werden nur Dateien ausgeführt, denen ein Handler zugewiesen
            wurde. Wird die Dateiendung nicht im Dictionary gefunden, wird
            das Tool beendet, so dass die Abarbeitung des Requests z.B. von
            Staticdir ausgeliefert werden kann.

        :param server_admin: Enthält Namen/E-Mail-Adresse des
            Server-Administrators. Diese Information wird über die
            Umgebungsvariable SERVER_ADMIN an das CGI-Programm übergeben.
        """

        # short names for request and headers
        request = cherrypy.request
        headers = request.headers

        # Allow the use of '~' to refer to a user's home directory.
        # (copied from *cherrypy.lib.static*)
        dir = os.path.expanduser(dir)

        # If dir is relative, make absolute using "root".
        # (copied from *cherrypy.lib.static*)
        if not os.path.isabs(dir):
            if not root:
                msg = "Static dir requires an absolute dir (or root)."
                cherrypy.log(msg, 'TOOLS.CGISERVER')
                raise ValueError(msg)
            dir = os.path.join(root, dir)

        # Determine where we are in the object tree relative to 'base_url'
        # (copied from *cherrypy.lib.static*)
        if base_url == "":
            base_url = "/"
        base_url = base_url.rstrip(r"\/")
        branch = request.path_info[len(base_url) + 1:]
        branch = unquote(branch.lstrip(r"\/"))

        # Dateiname des Skriptes ermitteln (Es muss auf angehängte Pfade geachtet werden.)
        # Dabei wird auch der an die URL des Skriptes angehängte Pfad ermittelt
        branch_items = branch.split("/")
        script_filename = None
        path_info_start = None
        path_info = ""
        for i in range(len(branch_items), 0, -1):
            _file_path = os.path.join(dir, *branch_items[:i])
            if os.path.isfile(_file_path):
                script_filename = _file_path
                path_info_start = i
                break
        if not path_info_start is None and branch_items[path_info_start:]:
            path_info = "/" + "/".join(branch_items[path_info_start:])
        if not script_filename:
            return

        # URL des Skriptes ermitteln
        script_name = script_filename[len(dir):]

        # There's a chance that the branch pulled from the URL might
        # have ".." or similar uplevel attacks in it. Check that the final
        # filename is a child of dir.
        # (copied from *cherrypy.lib.static*)
        if not os.path.normpath(script_filename).startswith(
                os.path.normpath(dir)):
            raise cherrypy.HTTPError(403)  # Forbidden

        # Wenn Dateiendung unbekannt, dann Funktion beenden, damit ein
        # evt. eingestelltes Staticdir-Tool die Datei ausliefern kann
        ext = os.path.splitext(script_filename)[1]
        if ext not in handlers:
            return

        # Interpreter anhand der Dateiendung ermitteln
        handler_executable = handlers[ext]

        #        # TEST ---------
        #        cherrypy.serving.response.body = [show_request()]
        #        cherrypy.serving.request.handler = None
        #        return
        #        # TEST ----------

        # prepare body
        if request.method in request.methods_with_bodies:
            body_file = request.rfile
            content_length = headers.get("content-length", 0)
        else:
            body_file = StringIO()
            content_length = None

        # prepare environment for CGI callable
        # There I got infos about the environment variables:
        # http://de.selfhtml.org/servercgi/cgi/umgebungsvariablen.htm
        # http://tools.ietf.org/html/rfc3875#page-12

        env = {
            # GATEWAY_INTERFACE
            # Enthält die Version der CGI-Schnittstelle, die von dem installierten
            # Server unterstützt wird, z.B. CGI/1.1, wenn die gegenwärtig übliche
            # Version 1.1 der Schnittstellendefinition unterstützt wird.
            "GATEWAY_INTERFACE": "CGI/1.1",

            # SERVER_SIGNATURE
            # Enthält eine erweiterte Selbstauskunft des Servers,
            # z.B. Apache/1.3.31 Server at localhost Port 80.
            "SERVER_SIGNATURE": "CherryPy CgiServer",

            # REMOTE_ADDR
            # Enthält die IP-Adresse des Server-Rechners, über den das CGI-Script
            # aufgerufen wurde. Es muss sich hierbei nicht unbedingt um die
            # IP-Adresse des aufrufenden Client-Rechners handeln - der Wert kann
            # beispielsweise auch von einem Proxy-Server stammen.
            "REMOTE_ADDR": request.remote.ip,

            # REMOTE_PORT
            # Ermittelt, über welchen Port des Client-Rechners das CGI-Script
            # aufgerufen wurde. Diese Zahl liegt gewöhnlich im Bereich ab 1024
            # aufwärts und wird vom aufrufenden Web-Browser zufällig ausgewählt.
            "REMOTE_PORT": str(request.remote.port),

            # SERVER_ADDR
            # Enthält die IP-Adresse des Server-Rechners.
            "SERVER_ADDR": request.local.ip,

            # SERVER_PORT
            # Enthält die Portnummer, die für den Webserver eingerichtet wurde.
            # Normalerweise ist dies für Webserver die Nummer 80.
            "SERVER_PORT": str(request.local.port),

            # DOCUMENT_ROOT
            # Enthält den physischen Pfad des Wurzelverzeichnisses für die Ablage
            # von Dateien, die im Webserver aufrufbar sind. Ein CGI-Script kann aus
            # dieser Angabe beispielsweise absolute Pfadnamen zum Öffnen von Dateien
            # errechnen.
            "DOCUMENT_ROOT": dir,

            # PATH_INFO
            # Wird einem CGI-Script eine Zeichenkette mit Daten übergeben,
            # dann enthält PATH_INFO den Teil der Zeichenkette nach dem Namen
            # des Scripts bis zum ersten ?. Wenn das Script beispielsweise die
            # Adresse http://meine.seite.net/cgi-bin/test.pl hat, aber mit
            # http://meine.seite.net/cgi-bin/test.pl/querys/musicbase.sql?cat=Mozart
            # aufgerufen wird, dann enthält diese Umgebungsvariable den Anteil
            # /querys/musicbase.sql. Sie ist dazu gedacht, Dateinamen mit Pfadangabe
            # als Übergabeparameter für Scripts zu ermöglichen.
            "PATH_INFO": path_info,

            # PATH_TRANSLATED
            # Enthält wie PATH_INFO den Anteil des URI nach dem Scriptnamen bis
            # zum ersten ?, jedoch mit dem Unterschied, dass nicht der Anteil
            # selbst aus dem URI zurückgegeben wird, sondern der vom Webserver
            # übersetzte Datenpfad dieses Anteils. Angenommen, das Script hat die
            # Adresse http://meine.seite.net/cgi-bin/test.pl, wurde aber mit
            # http://meine.seite.net/cgi-bin/test.pl/querys/musicbase.sql aufgerufen.
            # Dann könnte der zusätzliche Adressanteil /querys/musicbase.sql aus
            # Sicht des Webservers beispielsweise in einen physischen Pfadnamen wie
            # /usr/web/seite/querys/musicbase.sql aufgelöst werden. Diesen Pfadnamen
            # würde PATH_TRANSLATED zurückgeben.
            "PATH_TRANSLATED":
            dir.rstrip("/") + path_info if path_info else "",

            # SCRIPT_FILENAME
            # Enthält den physischen Pfad des Scripts auf dem Server-Rechner,
            # also z.B. /usr/web/data/cgi-bin/test.pl.
            "SCRIPT_FILENAME": script_filename,

            # SCRIPT_NAME
            # Enthält den HTTP-Pfad des Scripts. Angenommen, das Script hat die
            # Adresse http://meine.seite.net/cgi-bin/test.pl. Dann liefert
            # SCRIPT_NAME den Wert /cgi-bin/test.pl.
            "SCRIPT_NAME": script_name,
        }

        # REMOTE_HOST
        # Enthält den Hostnamen des Rechners, über den das CGI-Script aufgerufen
        # wurde. Dieser Wert wird jedoch nur gesetzt, wenn der Webserver
        # entsprechend konfiguriert und dazu in der Lage ist, der IP-Adresse
        # den entsprechenden Hostnamen zuzuordnen. Es muss sich hierbei nicht
        # unbedingt um die IP-Adresse des aufrufenden Client-Rechners handeln -
        # der Wert kann beispielsweise auch von einem Proxy-Server stammen.
        if request.remote.name:
            env["REMOTE_HOST"] = request.remote.name

        # SERVER_NAME
        # Enthält den Namen des Server-Rechners, auf dem das CGI-Script läuft.
        # Normalerweise ist dies der eingetragene Hostname des Rechners.
        if request.local.name:
            env["SERVER_NAME"] = request.local.name

        # CONTENT_LENGTH
        # Enthält die Anzahl der Zeichen, die beim Aufruf des CGI-Scripts
        # über die POST-Methode übergeben wurden. Wenn das CGI-Script
        # beispielsweise beim Absenden eines HTML-Formulars aufgerufen
        # wurde und dort als Übertragungsmethode POST angegeben ist,
        # steht in dieser Umgebungsvariablen, wie viele Zeichen das Script
        # von der Standardeingabe lesen muss, um die übermittelten
        # Formulardaten vollständig einzulesen.
        if not content_length is None:
            env["CONTENT_LENGTH"] = str(content_length)

        # CONTENT_TYPE
        # Enthält beim Aufruf über die POST-Methode den Seite MIME-Typ
        # der übergebenen Daten. Wenn das CGI-Script beispielsweise beim
        # Absenden eines HTML-Formulars aufgerufen wurde und dort als
        # Übertragungsmethode POST angegeben ist, steht in dieser
        # Umgebungsvariablen der für HTML-Formulare typische MIME-Typ
        # application/x-www-form-urlencoded (zu diesem MIME-Typ siehe auch
        # Seite Datenstrom bei Übertragung von Formulardaten).
        if "content-type" in headers:
            env["CONTENT_TYPE"] = headers["CONTENT_TYPE"]

        # HTTP_ACCEPT
        # Enthält die Liste der MIME-Typen, die der aufrufende Web-Browser
        # akzeptiert. Die Angabe */* bedeutet: der Web-Browser akzeptiert alles.
        if "accept" in headers:
            env["HTTP_ACCEPT"] = headers["accept"]
        elif hasattr(request, "wsgi_environ"):
            if "HTTP_ACCEPT" in request.wsgi_environ:
                env["HTTP_ACCEPT"] = request.wsgi_environ["HTTP_ACCEPT"]

        # HTTP_ACCEPT_CHARSET
        # Enthält die Liste der Zeichenkodierungen, die der aufrufende Web-Browser
        # akzeptiert, beispielsweise iso-8859-1, utf-8, utf-16, *;q=0.1.
        if "accept-charset" in headers:
            env["HTTP_ACCEPT_CHARSET"] = headers["accept-charset"]

        # HTTP_ACCEPT_ENCODING
        # Enthält eine Liste der Kodierungsmethoden, die der aufrufende Browser
        # akzeptiert. Manche Browser akzeptieren beispielsweise auch den
        # Kodierungstyp gzip, was bedeutet, dass der Browser auch Dateien
        # empfangen kann, die nach dem GNU-Zip-Algorithmus komprimiert an ihn
        # übertragen werden.
        if "accept-encoding" in headers:
            env["HTTP_ACCEPT_ENCODING"] = headers["accept-encoding"]
        elif hasattr(request, "wsgi_environ"):
            if "HTTP_ACCEPT_ENCODING" in request.wsgi_environ:
                env["HTTP_ACCEPT_ENCODING"] = request.wsgi_environ[
                    "HTTP_ACCEPT_ENCODING"]

        # HTTP_ACCEPT_LANGUAGE
        # Enthält, welche Landessprache der aufrufende Browser bei seiner
        # Benutzeroberfläche verwendet. Häufige Werte sind z.B. de (für
        # deutschsprachige Browser) oder en (für englischsprachige Browser).
        # Ein CGI-Script kann aufgrund dieser Angabe beispielsweise entscheiden,
        # ob es eine deutschsprachige oder eine englischsprachige Antwort an
        # den Browser sendet.
        if "accept-language" in headers:
            env["HTTP_ACCEPT_LANGUAGE"] = headers["accept-language"]

        # HTTP_CONNECTION
        # Enthält Informationen über den Status der HTTP-Verbindung zwischen
        # Server und aufrufendem Browser. Der Wert Keep-Alive bedeutet, der
        # Browser wartet auf Antwort.
        if "connection" in headers:
            env["HTTP_CONNECTION"] = headers["connection"]
        elif hasattr(request, "wsgi_environ"):
            if "HTTP_CONNECTION" in request.wsgi_environ:
                env["HTTP_CONNECTION"] = request.wsgi_environ[
                    "HTTP_CONNECTION"]

        # HTTP_COOKIE
        # Enthält Namen und Wert von Cookies, sofern solche vom aufrufenden
        # Browser gesendet werden.
        if request.cookie:
            env["HTTP_COOKIE"] = request.cookie

        # HTTP_HOST
        # Enthält den Domain-Namen oder die IP-Adresse aus der Adresszeile des
        # aufrufenden Browsers. Für ein CGI-Script kann diese Angabe wichtig sein,
        # falls es mehrere Server bedienen muss.
        if "host" in headers:
            env["HTTP_HOST"] = headers["host"]
        elif hasattr(request, "wsgi_environ"):
            if "HTTP_HOST" in request.wsgi_environ:
                env["HTTP_HOST"] = request.wsgi_environ["HTTP_HOST"]

        # HTTP_REFERER
        # Enthält den URI der Web-Seite, von der aus das CGI-Script aufgerufen
        # wurde. Der Wert wird jedoch nicht von allen Web-Browsern korrekt
        # übermittelt, ist also nicht in jedem Fall verfügbar.
        if "referer" in headers:
            env["HTTP_REFERER"] = headers["referer"]

        # HTTP_USER_AGENT
        # Enthält Produkt- und Versionsinformationen zum aufrufenden Web-Browser.
        # Ein CGI-Script kann auf diese Weise ermitteln, welchen Browser ein
        # Anwender verwendet.
        if "user-agent" in headers:
            env["HTTP_USER_AGENT"] = headers["user-agent"]

        # QUERY_STRING
        # Enthält eine Zeichenkette mit Daten, die dem Script im URI nach dem
        # ersten ? übergeben wurden. Angenommen, das Script hat die Adresse
        # http://meine.seite.net/cgi-bin/test.pl, wurde aber mit
        # http://meine.seite.net/cgi-bin/test.pl?User=Stefan aufgerufen.
        # Dann würde QUERY_STRING den Wert User=Stefan enthalten. Wenn ein
        # Anwender ein HTML-Formular ausgefüllt hat, bei dessen Absenden das
        # CGI-Script mit der GET-Methode aufgerufen wurde, dann stehen in
        # dieser Umgebungsvariablen die ausgefüllten Formulardaten. Die Daten
        # sind nach den Regeln des MIME-Typs application/x-www-form-urlencoded
        # kodiert.
        if hasattr(request, "query_string"):
            env["QUERY_STRING"] = request.query_string
        elif hasattr(request, "wsgi_environ"):
            if "QUERY_STRING" in request.wsgi_environ:
                env["QUERY_STRING"] = request.wsgi_environ["QUERY_STRING"]

        # REMOTE_IDENT
        # Enthält Protokollinformationen, wenn auf dem Server das Protokoll
        # ident für geschützte Zugriffe läuft.

        # REMOTE_USER
        # Enthält den Benutzernamen, mit dem sich der aufrufende Benutzer
        # angemeldet hat, um das CGI-Script ausführen zu lassen. Wenn das
        # Script beispielsweise htaccess-geschützt ist, muss sich der
        # aufrufende Benutzer mit Benutzernamen und Passwort anmelden. Der
        # dabei eingegebene Benutzername kann mit dieser Variable ermittelt
        # werden.
        if hasattr(request, "login"):
            if request.login and not request.login.lower() == "none":
                env["REMOTE_USER"] = request.login

        # REQUEST_METHOD
        # Enthält die HTTP-Anfragemethode, mit der das CGI-Programm aufgerufen
        # wurde. Beispielsweise GET oder POST. Ein CGI-Script kann diese
        # Variable auslesen und danach entscheiden, wie es Formulardaten
        # einlesen kann: entweder von der Standardeingabe (bei Methode POST)
        # oder aus der Umgebungsvariablen QUERY_STRING (bei Methode GET).
        if hasattr(request, "method"):
            env["REQUEST_METHOD"] = request.method

        # REQUEST_URI
        # Enthält den HTTP-Pfad des Scripts inklusive der im Aufruf übergebenen
        # Daten. Angenommen, das Script hat die Adresse
        # http://meine.seite.net/cgi-bin/test.pl und wurde mit
        # http://meine.seite.net/cgi-bin/test.pl?User=Stefan aufgerufen.
        # Dann liefert REQUEST_URI den Wert /cgi-bin/test.pl?User=Stefan.
        elif hasattr(request, "wsgi_environ"):
            if "REQUEST_URI" in request.wsgi_environ:
                env["REQUEST_URI"] = request.wsgi_environ["REQUEST_URI"]

        # SERVER_ADMIN
        # Enthält Namen/E-Mail-Adresse des in der Webserver-Konfiguration
        # eingetragenen Server-Administrators.
        if server_admin:
            env["SERVER_ADMIN"] = server_admin

        # SERVER_PROTOCOL
        # Enthält die Version des HTTP-Protokolls, das der installierte
        # Webserver unterstützt, z.B. HTTP/1.1, wenn die gegenwärtig übliche
        # Version 1.1 des HTTP-Protokolls unterstützt wird.
        if hasattr(request, "server_protocol"):
            env["SERVER_PROTOCOL"] = request.server_protocol

        # SERVER_SOFTWARE
        # Enthält den Namen und die Versionsnummer der Webserver-Software auf
        # dem Server-Rechner.
        if hasattr(request, "wsgi_environ"):
            if "SERVER_SOFTWARE" in request.wsgi_environ:
                env["SERVER_SOFTWARE"] = request.wsgi_environ[
                    "SERVER_SOFTWARE"]
                env["REDIRECT_STATUS"] = "200"

        # call interpreter
        cmd_args = [handler_executable, script_filename]
        proc = subprocess.Popen(cmd_args,
                                executable=handler_executable,
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT,
                                cwd=dir,
                                env=env)
        proc.stdin.write(body_file.read())

        # get headers
        cherrypy.serving.response.headers = wsgiserver2.read_headers(
            proc.stdout, cherrypy.serving.response.headers)

        # get body
        cherrypy.serving.response.body = proc.stdout

        # finished: no more request handler needed
        cherrypy.serving.request.handler = None
Example #51
0
#! /us/bin/env/python
# -*- coding:utf-8 -*-

# Find the best implementation available on this platform
try:
    from cStringIO import StringIO
except:
    from StringIO import StringIO

# Writing to a buffer
output = StringIO()
output.write('This goes into the buffer.')
print >> output, 'And so does this.'

# Retrieve the value written
print output.getvalue()

output.close()  #discard buffer memory

# Initialize a read buffer
input = StringIO('Inital value for read buffer')
# Read from the buffer
print input.read()
Example #52
0
class SocketFile(object):
    """
    File-like wrapper for reading socket objects.
    """
    def __init__(self, socket, encoding=None):
        super(SocketFile, self).__init__()
        self._socket = socket
        self._buffer = BytesIO()
        self.encoding = encoding

    def _append_buffer(self, data):
        pos = self._buffer.tell()
        self._buffer.seek(0, 2)
        self._buffer.write(data)
        self._buffer.seek(pos)

    def bind(self, *args, **kwargs):
        return self._socket.bind(*args, **kwargs)

    def connect(self, *args, **kwargs):
        return self._socket.connect(*args, **kwargs)

    def read(self, length, blocking=True):
        data = self._buffer.read(length)
        delta = length - len(data)
        if delta > 0:
            self._socket.setblocking(blocking)
            try:
                data += self._socket.recv(delta)
            except socket.error:
                pass
        return data

    def readline(self):
        parts = []
        while True:
            # Read the waiting data from the socket.
            data = self.read(1024, blocking=False)

            # If it contains a line-feed character, we add it
            # to the result list and append the rest of the data
            # to the buffer.
            if b'\n' in data:
                left, right = data.split(b'\n', 1)
                parts.append(left + b'\n')
                self._append_buffer(right)
                break

            else:
                if data:
                    parts.append(data)

                # Read a blocking byte for which we will get an empty
                # bytes object if the socket is closed-
                byte = self.read(1, blocking=True)
                if not byte:
                    break

                # Add the byte to the buffer. Stop here if it is a
                # newline character.
                parts.append(byte)
                if byte == b'\n':
                    break

        return b''.join(parts)

    def write(self, data):
        if isinstance(data, str):
            if not self.encoding:
                raise ValueError('got str object and no encoding specified')
            data = data.encode(self.encoding)

        return self._socket.send(data)

    def close(self):
        return self._socket.close()
    def parsetx(self, txdata, txcount):
        '''
        Parse Tx data to get version, hashes, etc.
        :param txdata:
        :return:
        '''

        txinfo = []
        utility = Util()
        scripts = script.Script()
        '''
        [---TX_PAYLOAD---]
        [ 4] VERSION                <I                                  uint32_t
        [..] TX_IN_COUNT            variable integer
        [..] TX_IN                  multiple of TX_IN_COUNT
            [32] PREV_OUT_HASH                                          char[32]
            [ 4] PREV_OUT_INDEX     <I (zero-based)                     uint32_t
            [..] SCRIPT_LENGTH      variable integer
            [..] SCRIPT             variable string
            [ 4] SEQUENCE           <I                                  uint32_t
        [..] TX_OUT_COUNT           variable integer
        [..] TX_OUT                 multiple of TX_OUT_COUNT
            [ 8] VALUE              <q                                  int64_t
            [..] SCRIPT_LENGTH      variable integer
            [..] SCRIPT             variable string
        [ 4] LOCK_TIME              <I                                  uint32_t
        '''

        decodeData = StringIO(txdata)

        # Get length of data, then reset to start
        decodeData.seek(0, os.SEEK_END)
        txdatalen = decodeData.tell()
        decodeData.seek(0)
        logger.info('Tx Data Length: {}'.format(txdatalen))

        # Loop through each transaction
        for txnum in range(1, txcount + 1):
            txdetails = {}
            logger.debug('Processing Tx # {}'.format(txnum))

            txdetails['version'] = struct.unpack('<I', decodeData.read(4))[0]
            txdetails['tx_in_count'] = utility.deserialize_int(decodeData)[0]
            txdetails['Number'] = txnum

            # Loop through each input
            logger.debug('Tx In Count: {}'.format(txdetails['tx_in_count']))
            for tx_input in range(txdetails['tx_in_count']):
                logger.debug('Tx In {}'.format(tx_input))
                logger.debug('Remaining length: {}'.format(txdatalen -
                                                           decodeData.tell()))
                txdetails['input_prev_hash_{}'.format(tx_input)] = hexlify(
                    decodeData.read(32)[::-1])
                txdetails['input_prev_index_{}'.format(
                    tx_input)] = struct.unpack('<I', decodeData.read(4))[0]
                txdetails['input_script_length_{}'.format(
                    tx_input)] = utility.deserialize_int(decodeData)[0]
                txdetails['input_script_{}'.format(tx_input)] = hexlify(
                    decodeData.read(
                        txdetails['input_script_length_{}'.format(tx_input)]))

                # Parse input script
                scripts.parsetxinputscript(txnum, tx_input, txinfo, txdetails)
                #self.parsetxinputscript(txnum, tx_input, txinfo, txdetails)

                txdetails['input_sequence_{}'.format(
                    tx_input)] = struct.unpack('<I', decodeData.read(4))[0]

            txdetails['tx_out_count'] = utility.deserialize_int(decodeData)[0]

            # Loop through each output
            logger.debug('Tx Out Count: {}'.format(txdetails['tx_out_count']))
            for tx_output in range(txdetails['tx_out_count']):
                txdetails['output_value_{}'.format(tx_output)] = struct.unpack(
                    '<q', decodeData.read(8))[0]  # In Satoshis
                txdetails['output_value_{}'.format(tx_output)] = (
                    txdetails['output_value_{}'.format(tx_output)] / 10**8
                )  #value = (value / 10**8)
                txdetails['output_script_length_{}'.format(
                    tx_output)] = utility.deserialize_int(decodeData)[0]
                txdetails['output_script_{}'.format(tx_output)] = hexlify(
                    decodeData.read(txdetails['output_script_length_{}'.format(
                        tx_output)]))

                # Parse out Opcodes, etc
                scripts.parsetxoutputscript(txnum, tx_output, txinfo,
                                            txdetails)

            txdetails['lock_time'] = struct.unpack('<I', decodeData.read(4))[0]
            txinfo.append(txdetails)

        #logger.info(pprint.pformat(txinfo))

        return txinfo
Example #54
0
def export_data():
    engine = create_engine(connection_string)
    cur = engine.connect()
    all_questions = cur.execute("select question from questions")
    all_answers = cur.execute(
        "select choice from questions_choices where is_right = true")
    quest_set = []
    ans_set = []
    for each_quest in all_questions:
        quest_set.append(each_quest[0])
    for each_ans in all_answers:
        ans_set.append(each_ans[0])
    quest_ans_set = zip(quest_set, ans_set)
    if current_user.is_admin and request.form['selected_report_type'] == 'XLS':
        file_data = StringIO()
        workbook = xlsxwriter.Workbook(file_data)
        worksheet = workbook.add_worksheet('Data')
        # Add a bold format to use to highlight cells.
        bold = workbook.add_format({'bold': True})
        worksheet.write('A1', 'Question', bold)
        # Start from the first cell. Rows and columns are zero indexed.
        row = 1
        col = 0
        for each_q_ans in quest_ans_set:
            worksheet.write(row, col, each_q_ans[0])
            row += 1
            worksheet.write(row, col, 'Ans: ' + each_q_ans[1])
            row += 1
        workbook.close()
        file_data.seek(0)
        return Response(file_data.read(),
                        mimetype="application/excel",
                        headers={
                            "Content-disposition":
                            "attachment; filename=Question Report.xls"
                        })

    # elif current_user.is_admin and request.form['selected_report_type'] == 'DOC':
    #     file_data = StringIO()
    #     document = Document()
    #     document.add_heading('Question Set', level=1)
    #     document.add_paragraph([str(ques_ans[0]) + '\n' + 'Ans: ' + str(ques_ans[1]) +
    #                             '\n' for ques_ans in quest_ans_set])
    #     document.save(file_data)
    #     file_data.seek(0)
    #     return Response(
    #         file_data.read(),
    #         mimetype="application/doc",
    #         headers={"Content-disposition": "attachment; filename=Question Report.doc"})

    elif current_user.is_admin and request.form[
            'selected_report_type'] == 'PDF':
        file_data = StringIO()
        p = canvas.Canvas(file_data, pagesize=A4)
        p.setFont('Helvetica', 14)
        p.drawString(250, 800, "Question Set")
        x = 60
        y = 760
        n = 1
        for each_quest_ans in quest_ans_set:
            p.drawString(x, y, str('Q.%s  ' % n) + str(each_quest_ans[0]))
            n += 1
            y -= 20
            p.drawString(x, y, 'Ans: ' + str(each_quest_ans[1]))
            y -= 30
        p.showPage()
        p.save()
        file_data.seek(0)
        return Response(file_data.read(),
                        mimetype="application/pdf",
                        headers={
                            "Content-disposition":
                            "attachment; filename=Question Report.pdf"
                        })

    else:
        flash(
            'Please log-in with Administrator account to view the control panel'
        )
        return redirect(url_for('index'))
Example #55
0
class ZipArchiver(zipfile.ZipFile, AbstractArchiver):
    def __init__(self):
        self.buffer = StringIO()
        self.position = 0
        zipfile.ZipFile.__init__(self, self.buffer, mode='w', allowZip64=True)

    def beginFile(self, file):
        AbstractArchiver.beginFile(self, file)

        if file.get('path') is None:
            ftime = time.localtime()
        else:
            ftime = time.localtime(os.path.getctime(file.get('path')))

        self.zipInfo = zipfile.ZipInfo(self.destinationPath(file), ftime[:6])
        self.reader.seek(0, 2)
        self.zipInfo.file_size = self.zipInfo.compress_size = self.reader.tell(
        )

        self.reader.seek(0)
        self.zipInfo.flag_bits = 0x00
        self.zipInfo.header_offset = self.position
        self.zipInfo.CRC = self.CRC32()

        self._writecheck(self.zipInfo)
        self._didModify = True

        self.fp.write(self.zipInfo.FileHeader())

        self.filelist.append(self.zipInfo)
        self.NameToInfo[self.zipInfo.filename] = self.zipInfo

        return

    def readBlock(self, block_size):
        self.fp.write(self.reader.read(block_size))
        block = self.buffer.getvalue()
        self.buffer.truncate(0)
        self.position += len(block)

        return block

    def readEnding(self):
        self.buffer.seek(self.position)
        self.close()
        newPos = self.buffer.tell()
        self.buffer.seek(self.position)
        block = self.buffer.read(newPos - self.position + 1)
        self.buffer.truncate(0)
        return block

    def EOF(self):
        if self.reader.tell() >= self.zipInfo.file_size:
            return True
        return False

    def CRC32(self):
        block = 1
        CRC = 0
        self.reader.seek(0)
        while block:
            block = self.reader.read(65536)
            CRC = zlib.crc32(block, CRC) & 0xffffffff
        self.reader.seek(0)

        return CRC

    def getContentType(self):
        return 'application/zip'

    def getFileExtension(self):
        return '.zip'
Example #56
0
def pxsuptime(supfile, base):
    """extract subtitles from ProjectX-generated .sup file. Picky!"""
    # Here thanks go to ProjetcX source, http://www.via.ecp.fr/~sam/doc/dvd/
    # and gtkspu program in gopchop distribution

    f = open(supfile, 'rb')

    if f.read(2) != b'SP':
        raise Exception("Syncword missing. XXX bailing out.")

    image = 1  # 1-based for feh(1) file number counting compatibility
    while True:
        # X.java reads 5 bytes of pts, SubPicture.java writes 4. With
        # 4 bytes 47721 seconds (13.25 hours) can be handled.
        pts, = struct.unpack('<Lxxxx', f.read(8))
        size, pack = struct.unpack('>HH', f.read(4))

        #print pts / 90, 'ms.', size, pack

        data = f.read(pack - 4)
        ctrl = StringIO(f.read(size - pack))

        # parsing control info

        prev = 0
        while True:
            date, = struct.unpack('>H', ctrl.read(2))
            next, = struct.unpack('>H', ctrl.read(2))  # XXX

            while True:
                cmd = ord(ctrl.read(1))
                if cmd == 0x00:  # force display:
                    continue
                if cmd == 0x01:  # start date (read above)
                    start = date  # XXX + previous
                    continue
                if cmd == 0x02:  # stop date (read above)
                    end = date  # XXX + previous
                    continue
                if cmd == 0x03:  # palette
                    xpalette = ctrl.read(2)
                    continue
                if cmd == 0x04:  # alpha channel
                    alpha = ctrl.read(2)
                    continue
                if cmd == 0x05:  # coordinates
                    coords = ctrl.read(6)
                    x1 = (xord(coords[0]) << 4) + (xord(coords[1]) >> 4)
                    x2 = ((xord(coords[1]) & 0xf) << 8) + xord(coords[2])
                    y1 = (xord(coords[3]) << 4) + (xord(coords[4]) >> 4)
                    y2 = ((xord(coords[4]) & 0xf) << 8) + xord(coords[5])
                    continue
                if cmd == 0x06:  # rle offsets
                    top_field, bottom_field = struct.unpack(
                        '>HH', ctrl.read(4))
                    continue
                if cmd == 0xff:  # end command
                    break
                else:
                    raise Execption("%d: Unknown control sequence" % cmd)
            if prev == next:
                break
            prev = next

        startpts = pts
        endpts = pts + end * 900  # (matches .son) other values seen: 1000, 1024

        sptstr = pts2ts(pts)

        width = x2 - x1 + 1
        if width % 2 != 0:
            print("Image %d width (%d) is not divisible by 2." %
                  (image, width),
                  file=sys.stderr,
                  end=' ')
            print("Check %s-%05d.bmp" % (base, image), file=sys.stderr)
        print("image='%d' start='%s' end='%s' x='%d' y='%d' w='%d' h='%d'" %
              (image, sptstr, pts2ts(endpts), x1, y1, width, y2 - y1 + 1))
        image = image + 1

        if f.read(2) != b'SP':
            if len(f.read(1)) == 0: return  # EOF
            raise Exception("Syncword missing. XXX bailing out.")
class TestFileTransferPerformance(unittest.TestCase):
    def setUp(self):
        self.test_file_contents = open(get_book_path('pg1661.epub'), "rb").read()
        self.file_stream = StringIO(self.test_file_contents)

        self.received_file_contents = ""

        # noinspection PyUnusedLocal
        def file_received(file_hash, extension, content, more_parts_follow):
            self.received_file_contents += content

        self.client_control = mock.MagicMock()
        self.client_control.command_deliver_file_received.side_effect = file_received

        self.server_control = mock.MagicMock()

    def _setup_loopback_transport(self, client_secure_channel, server_secure_channel):
        self.client_transport = pydb.com.transport.localloopbacktransportprotocol.LocalLoopBackTransportProtocol(
            client_secure_channel)
        self.server_transport = pydb.com.transport.localloopbacktransportprotocol.LocalLoopBackTransportProtocol(
            server_secure_channel)
        client_secure_channel.set_lower_layer(self.client_transport)
        server_secure_channel.set_lower_layer(self.server_transport)
        self.client_transport.set_partner(self.server_transport)
        self.server_transport.set_partner(self.client_transport)

    def _run_benchmark(self, secure_channel_name, client_secure_channel, server_session):
        client_secure_channel.initiate_secure_channel()
        start_wire_byte_count = self.server_transport.bytes_sent()
        start_time = time.clock()
        for i in xrange(FILE_DELIVERY_REPETITIONS):
            self.received_file_contents = ""
            server_session.deliver_file('a_hash', 'epub', self.file_stream.read(), more_parts_follow=False)
            self.file_stream.seek(0)
        stop_time = time.clock()
        self.assertEqual(self.received_file_contents, self.test_file_contents)

        duration = stop_time - start_time
        if duration == 0:
            duration = 0.001
        payload_bandwidth_kbps = len(self.test_file_contents) * FILE_DELIVERY_REPETITIONS / duration / 1024
        stop_wire_byte_count = self.server_transport.bytes_sent()

        bytes_sent_by_server = (stop_wire_byte_count-start_wire_byte_count) / FILE_DELIVERY_REPETITIONS
        overhead_bytes = bytes_sent_by_server-len(self.test_file_contents)

        print("Loopback results {:>60}: {:8} kbps, {:7.1f} ms, encoding overhead: {:7} bytes"
              .format(secure_channel_name, int(payload_bandwidth_kbps), round(duration * 1000, 1),  overhead_bytes))

    def test_01_json_session_with_aeshmac_secure_channel(self):
        client_session = pydb.com.jsonsession.JsonSession(self.client_control)
        server_session = pydb.com.jsonsession.JsonSession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonSession with AesHmacSecureChannel', client_secure_channel, server_session)

    def test_02_json_session_with_insecure_channel(self):
        client_session = pydb.com.jsonsession.JsonSession(self.client_control)
        server_session = pydb.com.jsonsession.JsonSession(self.server_control)

        client_secure_channel = pydb.com.securechannel.insecurechannel.InsecureChannel(
            client_session)
        server_secure_channel = pydb.com.securechannel.insecurechannel.InsecureChannel(
            server_session)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonSession with InsecureChannel', client_secure_channel, server_session)

    def test_03_json_and_binary_session_with_aeshmac_secure_channel(self):
        client_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.client_control)
        server_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonAndBinarySession with AesHmacSecureChannel', client_secure_channel, server_session)

    def test_04_json_and_binary_session_with_insecure_channel(self):
        client_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.client_control)
        server_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.server_control)

        client_secure_channel = pydb.com.securechannel.insecurechannel.InsecureChannel(
            client_session)
        server_secure_channel = pydb.com.securechannel.insecurechannel.InsecureChannel(
            server_session)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonAndBinarySession with InsecureChannel', client_secure_channel, server_session)

    def test_05_json_session_with_aeshmac_secure_channel_compression_level_0(self):
        client_session = pydb.com.jsonsession.JsonSession(self.client_control)
        server_session = pydb.com.jsonsession.JsonSession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        client_secure_channel.set_compression_level(0)
        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])
        server_secure_channel.set_compression_level(0)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonSession with AesHmacSecureChannel compress 0', client_secure_channel, server_session)

    def test_06_json_and_binary_session_with_aeshmac_secure_channel_compression_level_0(self):
        client_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.client_control)
        server_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        client_secure_channel.set_compression_level(0)

        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])
        server_secure_channel.set_compression_level(0)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonAndBinarySession with AesHmacSecureChannel compress 0', client_secure_channel,
                            server_session)

    def test_07_json_session_with_aeshmac_secure_channel_without_compression(self):
        client_session = pydb.com.jsonsession.JsonSession(self.client_control)
        server_session = pydb.com.jsonsession.JsonSession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        client_secure_channel.set_compression_level(None)
        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])
        server_secure_channel.set_compression_level(None)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonSession with AesHmacSecureChannel compress off', client_secure_channel, server_session)

    def test_08_json_and_binary_session_with_aeshmac_secure_channel_without_compression(self):
        client_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.client_control)
        server_session = pydb.com.json_and_binary_session.JsonAndBinarySession(self.server_control)

        client_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            client_session, pre_shared_secret_passphrase="hello")
        client_secure_channel.set_compression_level(None)

        server_secure_channel = pydb.com.securechannel.aeshmacsecurechannel.AesHmacSecureChannel(
            server_session,
            friends=[
                {'name': 'test friend', 'id': 1, 'comm_data': {'secret': 'hello'}}
            ])
        server_secure_channel.set_compression_level(None)

        client_session.set_lower_layer(client_secure_channel)
        server_session.set_lower_layer(server_secure_channel)

        self._setup_loopback_transport(client_secure_channel, server_secure_channel)

        self._run_benchmark('JsonAndBinarySession with AesHmacSecureChannel compress off', client_secure_channel,
                            server_session)
Example #58
0
    def _proxy(self, url, params=None, method=None, body=None, headers=None):
        # get query string

        params = dict(self.request.params) if params is None else params
        parsed_url = urlparse(url)
        all_params = parse_qs(parsed_url.query)

        for p in all_params:
            all_params[p] = ",".join(all_params[p])
        all_params.update(params)
        params_encoded = {}
        for k, v in all_params.iteritems():
            params_encoded[k] = unicode(v).encode("utf-8")
        query_string = urllib.urlencode(params_encoded)

        if parsed_url.port is None:
            url = "%s://%s%s?%s" % (parsed_url.scheme, parsed_url.hostname,
                                    parsed_url.path, query_string)
        else:  # pragma: nocover
            url = "%s://%s:%i%s?%s" % (parsed_url.scheme, parsed_url.hostname,
                                       parsed_url.port, parsed_url.path,
                                       query_string)

        log.info("Send query to URL:\n%s." % url)

        if method is None:
            method = self.request.method

        # forward request to target (without Host Header)
        http = httplib2.Http()
        if headers is None:  # pragma: nocover
            headers = dict(self.request.headers)
            if parsed_url.hostname != "localhost":
                headers.pop("Host")

        headers["Cache-Control"] = "no-cache"

        if method in ["POST", "PUT"] and body is None:  # pragma: nocover
            body = StringIO(self.request.body)

        try:
            if method in ["POST", "PUT"]:
                resp, content = http.request(url,
                                             method=method,
                                             body=body,
                                             headers=headers)
            else:
                resp, content = http.request(url,
                                             method=method,
                                             headers=headers)
        except Exception as e:  # pragma: nocover
            log.error("Error '%s' while getting the URL:\n%s\nMethode: %s." %
                      (sys.exc_info()[0], url, method))

            log.error("--- With headers ---\n%s" %
                      "\n".join(["%s: %s" % h for h in headers.items()]))

            log.error("--- Exception ---")
            log.exception(e)

            if method in ["POST", "PUT"]:
                log.error("--- With body ---")
                if hasattr(body, "read"):
                    body.reset()
                    log.error(body.read())
                else:
                    log.error(body)

            raise HTTPBadGateway("See logs for detail")

        if resp.status < 200 or resp.status >= 300:  # pragma: no cover
            log.error("Error '%s' in response of URL:\n%s." %
                      (resp.reason, url))

            log.error("Status: %i" % resp.status)
            log.error("Method: %s" % method)

            log.error("--- With headers ---\n%s" %
                      "\n".join(["%s: %s" % h for h in headers.items()]))

            if method == "POST":
                log.error("--- Query with body ---")
                if hasattr(body, "read"):
                    body.reset()
                    log.error(body.read())
                else:
                    log.error(body)

            log.error("--- Return content ---")
            log.error(content)

            raise HTTPInternalServerError("See logs for details")

        return resp, content
Example #59
0
def captcha_image(request, key):
    store = get_object_or_404(CaptchaStore, hashkey=key)
    text = store.challenge

    if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'):
        font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH,
                                  settings.CAPTCHA_FONT_SIZE)
    else:
        font = ImageFont.load(settings.CAPTCHA_FONT_PATH)

    size = font.getsize(text)
    size = (size[0] * 2, size[1])
    image = Image.new('RGB', size, settings.CAPTCHA_BACKGROUND_COLOR)

    try:
        PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION))
    except:
        PIL_VERSION = 116

    xpos = 2
    for char in text:
        fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR)
        charimage = Image.new('L', font.getsize(' %s ' % char), '#000000')
        chardraw = ImageDraw.Draw(charimage)
        chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
        if settings.CAPTCHA_LETTER_ROTATION:
            if PIL_VERSION >= 116:
                charimage = charimage.rotate(
                    random.randrange(*settings.CAPTCHA_LETTER_ROTATION),
                    expand=0,
                    resample=Image.BICUBIC)
            else:
                charimage = charimage.rotate(
                    random.randrange(*settings.CAPTCHA_LETTER_ROTATION),
                    resample=Image.BICUBIC)
        charimage = charimage.crop(charimage.getbbox())
        maskimage = Image.new('L', size)

        maskimage.paste(
            charimage,
            (xpos, 4, xpos + charimage.size[0], 4 + charimage.size[1]))
        size = maskimage.size
        image = Image.composite(fgimage, image, maskimage)
        xpos = xpos + 2 + charimage.size[0]

    image = image.crop((0, 0, xpos + 1, size[1]))
    draw = ImageDraw.Draw(image)

    for f in settings.noise_functions():
        draw = f(draw, image)
    for f in settings.filter_functions():
        image = f(image)

    out = StringIO()
    image.save(out, "PNG")
    out.seek(0)

    response = HttpResponse()
    response['Content-Type'] = 'image/png'
    response.write(out.read())

    return response
Example #60
0
def export_all_responses():
    s3mgr.load("survey_series")
    s3mgr.load("survey_section")
    s3mgr.load("survey_complete")
    s3 = response.s3
    try:
        import xlwt
    except ImportError:
        output = s3_rest_controller("survey",
                                    "series",
                                    rheader=s3.survey_series_rheader)
        return output
    series_id = request.args[0]
    seriesName = response.s3.survey_getSeriesName(series_id)
    sectionBreak = False

    filename = "%s_All_responses.xls" % seriesName
    contentType = ".xls"
    output = StringIO()
    book = xlwt.Workbook(encoding="utf-8")
    # get all questions and write out as a heading
    col = 0
    completeRow = {}
    nextRow = 2
    qstnList = response.s3.survey_getAllQuestionsForSeries(series_id)
    if len(qstnList) > 256:
        sectionList = s3.survey_getAllSectionsForSeries(series_id)
        sectionBreak = True
    if sectionBreak:
        sheets = {}
        cols = {}
        for section in sectionList:
            sheetName = section["name"].split(" ")[0]
            if sheetName not in sheets:
                sheets[sheetName] = book.add_sheet(sheetName)
                cols[sheetName] = 0
    else:
        sheet = book.add_sheet(T("Responses"))
    for qstn in qstnList:
        if sectionBreak:
            sheetName = qstn["section"].split(" ")[0]
            sheet = sheets[sheetName]
            col = cols[sheetName]
        row = 0
        sheet.write(row,col,qstn["code"])
        row += 1
        widgetObj = s3.survey_getWidgetFromQuestion(qstn["qstn_id"])
        sheet.write(row,col,widgetObj.fullName())
        # for each question get the response
        allResponses = s3.survey_getAllAnswersForQuestionInSeries(qstn["qstn_id"],
                                                                  series_id)
        for answer in allResponses:
            value = answer["value"]
            complete_id = answer["complete_id"]
            if complete_id in completeRow:
                row = completeRow[complete_id]
            else:
                completeRow[complete_id] = nextRow
                row = nextRow
                nextRow += 1
            sheet.write(row,col,value)
        col += 1
        if sectionBreak:
            cols[sheetName] += 1
    sheet.panes_frozen = True
    sheet.horz_split_pos = 2
    book.save(output)


    output.seek(0)
    response.headers["Content-Type"] = contenttype(contentType)
    response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
    return output.read()