Example #1
0
    def install(self, plugin):
        directories = conf.supybot.directories.plugins()
        directory = self._getWritableDirectoryFromList(directories)
        assert directory is not None
        dirname = ''.join((self._path, plugin))

        fileObject = urllib2.urlopen(self._downloadUrl)
        fileObject2 = StringIO()
        fileObject2.write(fileObject.read())
        fileObject.close()
        fileObject2.seek(0)
        archive = tarfile.open(fileobj=fileObject2, mode='r:gz')
        prefix = archive.getnames()[0]
        try:
            assert archive.getmember(prefix + dirname).isdir()

            for file in archive.getmembers():
                if file.name.startswith(prefix + dirname):
                    extractedFile = archive.extractfile(file)
                    newFileName = os.path.join(*file.name.split('/')[1:])
                    newFileName = newFileName[len(self._path)-1:]
                    newFileName = os.path.join(directory, newFileName)
                    if os.path.exists(newFileName):
                        assert os.path.isdir(newFileName)
                        shutil.rmtree(newFileName)
                    if extractedFile is None:
                        os.mkdir(newFileName)
                    else:
                        open(newFileName, 'a').write(extractedFile.read())
        finally:
            archive.close()
            fileObject2.close()
            del archive, fileObject, fileObject2
Example #2
0
 def download_tweets_csv(self, request, queryset):
     f = StringIO()
     w = unicodecsv.writer(f, encoding='utf-8')
     for tweet in queryset:
         w.writerow((
             tweet['data']['id'],
             tweet['data']['text'],
             tweet['data']['timestamp'],
             tweet['data']['retweet_count'],
             tweet['data']['favorite_count'],
             tweet['data']['in_reply_to_status_id'],
             tweet['data']['in_reply_to_user_id'],
             tweet['data']['retweeted_status_id'],
             tweet['data']['coords'],
             tweet['data']['user']['screen_name'],
             tweet['data']['user']['id'],
             tweet['data']['user']['name'],
         ))
     f.seek(0)
     response = HttpResponse(
         f.read(),
         content_type='text/csv'
     )
     response['Content-Disposition'] = 'attachment;filename=export.csv'
     return response
Example #3
0
File: srj.py Project: ox-it/humfrey
    def _iter(self, sparql_results_type, fields, bindings, boolean, triples):
        if sparql_results_type not in ('resultset', 'boolean'):
            raise TypeError("Unexpected results type: {0}".format(sparql_results_type))

        # We'll spool to a buffer, and only yield when it gets a bit big.
        buffer = StringIO()

        # Do these attribute lookups only once.
        json_dumps, json_dump, buffer_write = json.dumps, json.dump, buffer.write

        buffer_write('{\n')
        if sparql_results_type == 'boolean':
            buffer_write('  "head": {},\n')
            buffer_write('  "boolean": %s' % ('true' if boolean else 'false'))
        elif sparql_results_type == 'resultset':
            buffer_write('  "head": {\n')
            buffer_write('    "vars": [ %s ]\n' % ', '.join(json_dumps(field) for field in fields))
            buffer_write('  },\n')
            buffer_write('  "results": {\n')
            buffer_write('    "bindings": [\n')
            for i, binding in enumerate(bindings):
                buffer_write('      {' if i == 0 else ',\n      {')
                j = 0
                for field in fields:
                    value = binding.get(field)
                    if value is None:
                        continue
                    buffer_write(',\n        ' if j > 0 else '\n        ')
                    json_dump(field, buffer)
                    if isinstance(value, rdflib.URIRef):
                        buffer_write(': { "type": "uri"')
                    elif isinstance(value, rdflib.BNode):
                        buffer_write(': { "type": "bnode"')
                    elif value.datatype is not None:
                        buffer_write(': { "type": "typed-literal", "datatype": ')
                        json_dump(value.datatype, buffer)
                    elif value.language is not None:
                        buffer_write(': { "type": "literal", "xml:lang": ')
                        json_dump(value.language, buffer)
                    else:
                        buffer_write(': { "type": "literal"')
                    buffer_write(', "value": ')
                    json_dump(value, buffer)
                    buffer_write(' }')

                    j += 1

                buffer_write('\n      }')
            buffer_write('\n    ]')
            buffer_write('\n  }')


            if buffer.tell() > 65000: # Almost 64k
                yield buffer.getvalue()
                buffer.seek(0)
                buffer.truncate()

        buffer_write('\n}')
        yield buffer.getvalue()
        buffer.close()
 def dumps(self, arg, proto=0):
     f = StringIO()
     p = cPickle.Pickler(f, proto)
     p.fast = 1
     p.dump(arg)
     f.seek(0)
     return f.read()
Example #5
0
def createZip(path):

    def walktree (top = ".", depthfirst = True):
        names = os.listdir(top)
        if not depthfirst:
            yield top, names
        for name in names:
            try:
                st = os.lstat(os.path.join(top, name))
            except os.error:
                continue
            if stat.S_ISDIR(st.st_mode):
                for (newtop, children) in walktree (os.path.join(top, name),
                                                    depthfirst):
                    yield newtop, children
        if depthfirst:
            yield top, names

    list=[]
    for (basepath, children) in walktree(path,False):
          for child in children:
              f=os.path.join(basepath,child)
              if os.path.isfile(f):
                    f = f.encode(sys.getfilesystemencoding())
                    list.append( f )

    f=StringIO()
    file = zipfile.ZipFile(f, "w")
    for fname in list:
        nfname=os.path.join(os.path.basename(path),fname[len(path)+1:])
        file.write(fname, nfname , zipfile.ZIP_DEFLATED)
    file.close()

    f.seek(0)
    return f
Example #6
0
 def _apply_watermark(self, datafile):
     text = self.aq_parent.watermark_text
     FONT = os.path.join(os.path.dirname(__file__), 'fonts', 'VeraSeBd.ttf')
     img = Image.open(datafile)
     newimg = StringIO()
     fmt = img.format
     watermark = Image.new("RGBA", (img.size[0], img.size[1]))
     draw = ImageDraw.ImageDraw(watermark, "RGBA")
     size = 0
     while True:
         size += 1
         nextfont = ImageFont.truetype(FONT, size)
         nexttextwidth, nexttextheight = nextfont.getsize(text)
         if nexttextwidth+nexttextheight/3 > watermark.size[0]:
             break
         font = nextfont
         textwidth, textheight = nexttextwidth, nexttextheight
     draw.setfont(font)
     draw.text(((watermark.size[0]-textwidth)/2,
                (watermark.size[1]-textheight)/2), text)
     watermark = watermark.rotate(degrees(atan(float(img.size[1])/img.size[0])),
                              Image.BICUBIC)
     mask = watermark.convert("L").point(lambda x: min(x, 88))
     watermark.putalpha(mask)
     img.paste(watermark, None, watermark)
     quality = self._photo_quality(datafile)
     img.save(newimg, fmt, quality=quality)
     newimg.seek(0)
     return newimg
Example #7
0
def magic_insert(curs, tablename, data, fields = None, use_insert = 0, quoted_table = False):
    r"""Copy/insert a list of dict/list data to database.

    If curs == None, then the copy or insert statements are returned
    as string.  For list of dict the field list is optional, as its
    possible to guess them from dict keys.

    Example:
    >>> magic_insert(None, 'tbl', [[1, '1'], [2, '2']], ['col1', 'col2'])
    'COPY public.tbl (col1,col2) FROM STDIN;\n1\t1\n2\t2\n\\.\n'
    """
    if len(data) == 0:
        return

    # decide how to process
    if hasattr(data[0], 'keys'):
        if fields == None:
            fields = data[0].keys()
        if use_insert:
            row_func = _gen_dict_insert
        else:
            row_func = _gen_dict_copy
    else:
        if fields == None:
            raise Exception("Non-dict data needs field list")
        if use_insert:
            row_func = _gen_list_insert
        else:
            row_func = _gen_list_copy

    qfields = [skytools.quote_ident(f) for f in fields]
    if quoted_table:
        qtablename = tablename
    else:
        qtablename = skytools.quote_fqident(tablename)

    # init processing
    buf = StringIO()
    if curs == None and use_insert == 0:
        fmt = "COPY %s (%s) FROM STDIN;\n"
        buf.write(fmt % (qtablename, ",".join(qfields)))

    # process data
    for row in data:
        buf.write(row_func(qtablename, row, fields, qfields))
        buf.write("\n")

    # if user needs only string, return it
    if curs == None:
        if use_insert == 0:
            buf.write("\\.\n")
        return buf.getvalue()

    # do the actual copy/inserts
    if use_insert:
        curs.execute(buf.getvalue())
    else:
        buf.seek(0)
        hdr = "%s (%s)" % (qtablename, ",".join(qfields))
        curs.copy_from(buf, hdr)
Example #8
0
def fetch_image_from_url(file_url):
    """Returns an UploadedFile object after retrieving the file at the given URL."""
    inStream = urllib2.urlopen(file_url)

    parser = ImageFile.Parser()
    file_size = 0
    max_file_size = 20 * 1024 * 1024 # 20 megabytes
    read_size = 1024
    while True:
        s = inStream.read(read_size)
        file_size += len(s)
        if not s:
            break
        if file_size > max_file_size:
            raise Exception("file size exceeded max size: %s bytes" % max_file_size)
        parser.feed(s)

    inImage = parser.close()
    # convert to RGB to avoid error with png and tiffs
    #if inImage.mode != "RGB":
    #    inImage = inImage.convert("RGB")

    img_temp = StringIO()
    inImage.save(img_temp, 'PNG')
    img_temp.seek(0)

    file_object = File(img_temp, 'img_temp.png')
    uploaded_file = UploadedFile(file=file_object, name=file_object.name, content_type='image/png', size=file_size, charset=None)

    return uploaded_file
Example #9
0
    def __resize(self, display):
        #resize and resample photo
        original_id = self._getDisplayId()
        string_image = StringIO(str(self.get_data(original_id)))
        if display == 'Original':
            return string_image

        crop = False
        width, height = self.displays.get(display, (0, 0))
        # Calculate image width, size
        if not (width and height):
            size = LISTING_DISPLAYS.get(display, self.width())
            width, height = self.__get_crop_aspect_ratio_size(size)
            crop = True
        else:
            width, height = self.__get_aspect_ratio_size(width, height)
        
        # Resize image
        newimg = StringIO()
        img = Image.open(string_image)
        fmt = img.format
        try: img = img.resize((width, height), Image.ANTIALIAS)
        except AttributeError: img = img.resize((width, height))
        
        # Crop if needed
        if crop:
            box = self.__get_crop_box(width, height)
            img = img.crop(box)
            #img.load()
        quality = self._photo_quality(string_image)
        img.save(newimg, fmt, quality=quality)
        newimg.seek(0)
        return newimg
Example #10
0
    def _download_manifest(self):
        """
        Download the manifest file, and process it to return an ISOManifest.

        :return: manifest of available ISOs
        :rtype:  pulp_rpm.plugins.db.models.ISOManifest
        """
        manifest_url = urljoin(self._repo_url, models.ISOManifest.FILENAME)
        # I probably should have called this manifest destination, but I couldn't help myself
        manifest_destiny = StringIO()
        manifest_request = request.DownloadRequest(manifest_url, manifest_destiny)
        self.downloader.download([manifest_request])
        # We can inspect the report status to see if we had an error when retrieving the manifest.
        if self.progress_report.state == self.progress_report.STATE_MANIFEST_FAILED:
            raise IOError(_("Could not retrieve %(url)s") % {'url': manifest_url})

        manifest_destiny.seek(0)
        try:
            manifest = models.ISOManifest(manifest_destiny, self._repo_url)
        except ValueError:
            self.progress_report.error_message = _('The PULP_MANIFEST file was not in the ' +
                                                   'expected format.')
            self.progress_report.state = self.progress_report.STATE_MANIFEST_FAILED
            raise ValueError(self.progress_report.error_message)

        return manifest
Example #11
0
def scale(data, width, height, overlay=None):
	"""Rescale the given image, optionally cropping it to make sure the result image has the specified width and height."""
	import Image as pil
	from cStringIO import StringIO
	
	max_width = width
	max_height = height

	input_file = StringIO(data)
	img = pil.open(input_file)
	
	if img.mode != "RGBA":
		img = img.convert("RGBA")
	
	src_width, src_height = img.size
	src_ratio = float(src_width) / float(src_height)
	dst_width = max_width
	dst_height = dst_width / src_ratio
	
	if dst_height > max_height:
		dst_height = max_height
		dst_width = dst_height * src_ratio
	
	img = img.resize((int(dst_width), int(dst_height)), pil.ANTIALIAS)
		
	tmp = StringIO()
	do_overlay(img, overlay)
	
	img.save(tmp, 'PNG')
	tmp.seek(0)
	output_data = tmp.getvalue()
	input_file.close()
	tmp.close()
	
	return output_data
Example #12
0
 def mktree(self, odb, entries):
     """create a tree from the given tree entries and safe it to the database"""
     sio = StringIO()
     tree_to_stream(entries, sio.write)
     sio.seek(0)
     istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
     return istream.binsha
Example #13
0
def rrset_to_text(m):
    s = StringIO()

    try:
        if 'bailiwick' in m:
            s.write(';;  bailiwick: %s\n' % m['bailiwick'])

        if 'count' in m:
            s.write(';;      count: %s\n' % locale.format('%d', m['count'], True))

        if 'time_first' in m:
            s.write(';; first seen: %s\n' % sec_to_text(m['time_first']))
        if 'time_last' in m:
            s.write(';;  last seen: %s\n' % sec_to_text(m['time_last']))

        if 'zone_time_first' in m:
            s.write(';; first seen in zone file: %s\n' % sec_to_text(m['zone_time_first']))
        if 'zone_time_last' in m:
            s.write(';;  last seen in zone file: %s\n' % sec_to_text(m['zone_time_last']))

        if 'rdata' in m:
            for rdata in m['rdata']:
                s.write('%s IN %s %s\n' % (m['rrname'], m['rrtype'], rdata))

        s.seek(0)
        return s.read()
    finally:
        s.close()
Example #14
0
File: dill.py Project: brstrat/dill
def _create_stringo(value, position, closed):
    f = StringIO()
    if closed: f.close()
    else:
       f.write(value)
       f.seek(position)
    return f
    def test_gzip_batch(self):
        request = testing.DummyRequest()
        request.headers["User-Agent"] = "TestApp/1.0"
        request.headers["X-Signature"] = "key=TestKey1, mac=d7aab40b9db8ae0e0b40d98e9c50b2cfc80ca06127b42fbbbdf146752b47a5ed"
        request.headers["Date"] = "Wed, 25 Nov 2015 06:25:24 GMT"
        request.environ["REMOTE_ADDR"] = "1.2.3.4"
        request.client_addr = "2.3.4.5"

        # Gzip
        request.headers["Content-Encoding"] = "gzip"
        f = StringIO()
        gzip.GzipFile(fileobj=f, mode='wb').write(
            '[{"event1": "value"}, {"event2": "value"}]')
        f.seek(0)
        gzipped_body = f.read()

        request.body = gzipped_body
        request.content_length = len(request.body)
        response = self.collector.process_request(request)

        self.assertEqual(response.status_code, 200)
        self.assertEqual(
            [
                '{"ip": "2.3.4.5", "event": {"event1": "value"}, "time": "2015-11-17T12:34:56"}',
                '{"ip": "2.3.4.5", "event": {"event2": "value"}, "time": "2015-11-17T12:34:56"}',
            ],
            self.event_sink.events)
        self.assertEqual(self.error_sink.events, [])
        self.assertEqual(response.headers.get("Access-Control-Allow-Origin"), None)
Example #16
0
def export_query():
	"""export from report builder"""
	form_params = get_form_params()
	form_params["limit_page_length"] = None
	form_params["as_list"] = True
	doctype = form_params.doctype
	del form_params["doctype"]

	frappe.permissions.can_export(doctype, raise_exception=True)

	db_query = DatabaseQuery(doctype)
	ret = db_query.execute(**form_params)

	data = [['Sr'] + get_labels(db_query.fields, doctype)]
	for i, row in enumerate(ret):
		data.append([i+1] + list(row))

	# convert to csv
	from cStringIO import StringIO
	import csv

	f = StringIO()
	writer = csv.writer(f)
	for r in data:
		# encode only unicode type strings and not int, floats etc.
		writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))

	f.seek(0)
	frappe.response['result'] = unicode(f.read(), 'utf-8')
	frappe.response['type'] = 'csv'
	frappe.response['doctype'] = doctype
Example #17
0
def get_data(im):
    s = StringIO()
    if im.format != 'DIB':
        im.save(s, im.format)
    else:
        s.write(im.buf)
    s.seek(0)

    if im.format == 'BMP':
        bmp_f = s
        bmp_f.seek(10)
        offset = i32(bmp_f.read(4))
        dib_size = i32(bmp_f.read(4))
        dib = o32(dib_size)+bytearray(bmp_f.read(36))
        dib[:4] = o32(40)
        dib[8:12] = o32(i32(str(dib[8:12]))*2)
        dib[16:20] = o32(0)
        dib = dib[:40]
        bmp_f.seek(offset)
        data = bytearray(bmp_f.read())
        data = dib+data
    else:
        data = bytearray(s.read())

    return data
Example #18
0
	def render_meta_tile (self, metatile, tile):
		data = self.render_tile(metatile)
		image = Image.open( StringIO(data) )

		meta_cols, meta_rows = self.get_meta_size(metatile.z)
		meta_height = meta_rows * self.tile_size + 2 * self.metabuffer[1]
		for i in range(meta_cols):
			for j in range(meta_rows):
				minx = i * self.tile_size + self.metabuffer[0]
				maxx = minx + self.tile_size
				### this next calculation is because image origin is (top,left)
				maxy = meta_height - (j * self.tile_size + self.metabuffer[1])
				miny = maxy - self.tile_size
				subimage = image.crop((minx, miny, maxx, maxy))
				subimage.info = image.info
				buffer = StringIO()
				subimage.save(buffer, self.image_format, quality=85)
				buffer.seek(0)
				subdata = buffer.read()
				x = metatile.x * self.metasize[0] + i
				y = metatile.y * self.metasize[1] + j
				subtile = Tile( self, x, y, metatile.z )
				self.cache.set( subtile, subdata )
				if x == tile.x and y == tile.y:
					tile.data = subdata

		return tile.data
Example #19
0
def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts):
    """Apply Python's tokenize to source_text starting at index start
    while matching open and close curly braces.  When an unmatched
    close curly brace is found, return its index.  If not found,
    return len(source_text).  If there's a tokenization error, return
    the position of the error.
    """
    stream = StringIO(source_text)
    stream.seek(start)
    nesting = 0

    try:
        for kind, text, token_start, token_end, line_text \
                in tokenize.generate_tokens(stream.readline):

            if text == '{':
                nesting += 1
            elif text == '}':
                nesting -= 1
                if nesting < 0:
                    return token_pos_to_index(token_start, start, line_starts)

    except tokenize.TokenError as error:
        (message, error_pos) = error.args
        return token_pos_to_index(error_pos, start, line_starts)

    return len(source_text)
Example #20
0
    def show_image(self, colorbar=False):
        # start xpans if needed
        ds9.ds9_xpans()
        # start ds9 if need, or connect to existing
        display = ds9.ds9(target='validate')
        if self.frame_number is None:
            # display.set('frame delete all')
            display.set('frame new')
            display.set('scale zscale')
            display.set('cmap invert yes')
            f = StringIO()
            self.hdulist.writeto(f)
            f.flush()
            f.seek(0)
            hdulist = fits.open(f)
            for hdu in hdulist:
                del(hdu.header['PV*'])
            display.set_pyfits(hdulist)
            self.frame_number = display.get('frame frameno')
            display.set('frame center {}'.format(self.frame_number))
            display.set('zoom to fit')
            display.set('wcs align yes')
        display.set('frame frameno {}'.format(self.frame_number))

        self._interaction_context = InteractionContext(self)

        self.number_of_images_displayed += 1
Example #21
0
    def save(self, force_insert=False, force_update=False, using=None,
             update_fields=None):

        try:
            if self.image.file is not None:
                img = Image.open(self.image.file)

                thumbnail = img.resize((settings.GUDFUD_USER_THUMBNAIL_WIDTH, settings.GUDFUD_USER_THUMBNAIL_HEIGHT),
                                       Image.ANTIALIAS)

                temp_handle_img = StringIO()
                img.save(temp_handle_img, 'jpeg')
                temp_handle_img.seek(0)

                temp_handle_thumbnail = StringIO()
                thumbnail.save(temp_handle_thumbnail, 'jpeg')
                temp_handle_thumbnail.seek(0)

                fname_thumbnail = str(self.id) + ".jpeg"
                suf_thumbnail = SimpleUploadedFile(fname_thumbnail, temp_handle_thumbnail.read(),
                                                   content_type='image/jpeg')
                fname_img = str(self.id) + ".jpeg"
                suf_img = SimpleUploadedFile(fname_img, temp_handle_img.read(), content_type='image/jpeg')

                self.thumbnail_image.save(fname_thumbnail, suf_thumbnail, save=False)
                self.image.save(fname_img, suf_img, save=False)
        except ValueError:
            pass

        super(BaseUser, self).save(force_insert, force_update, using, update_fields)
Example #22
0
    def streamFile(self, site, inner_path):
        location = 0
        if config.use_tempfiles:
            buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b')
        else:
            buff = StringIO()

        s = time.time()
        while True:  # Read in 512k parts
            res = self.request("streamFile", {"site": site, "inner_path": inner_path, "location": location}, stream_to=buff)

            if not res:  # Error
                self.log("Invalid response: %s" % res)
                return False

            if res["location"] == res["size"]:  # End of file
                break
            else:
                location = res["location"]

        self.download_bytes += res["location"]
        self.download_time += (time.time() - s)
        self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + res["location"]
        buff.seek(0)
        return buff
def convert(fname, writer):
    io = StringIO()
    csvwriter = csv.writer(io)
    match = None
    for line in open(fname, 'rb'):
        row = [r.replace(' ', '').strip() for r in line.strip().split()]
        if len(row) == 2:
            match.set_date(row[0])
        elif len(row) == 3:
            m = re.search(START, row[0])
            match = Match(m.group('fixture'))
            match.set_date(row[1])
        elif len(row) > 3:
            if row[0].find(':') > 0:
                match.set_time(row[0])
                match.home = row[1]
                match.away = row[3]
                match.stadium = row[4]
                if len(row) > 5:
                    match.tv = row[5]
            else:
                match.home = row[0]
                match.away = row[2]
                match.stadium = row[3]
        else:
            continue
        if match and match.home and match.away:
            csvwriter.writerow(match.to_row())
            match = Match(match.fixture, match.kickoff)

    io.seek(0)
    writer.write(io.read())
Example #24
0
def generate_docx(request):
    # selected_checkboxes = request.POST.getlist('ch_country')
    country = request.GET['name']
    slug = request.GET['slug']
    q, UC_obj = get_news_set(request, slug)
    if q.count() == 0:
        response = HttpResponseNotModified()
        response['Count'] = q.count()
        response['Content-Disposition'] = 'attachment; filename=empty'
        return response
    try:
        UC_obj.last_time = q.order_by('-download_time')[0].download_time
        UC_obj.save()
    except IndexError:
        pass

    document = create_docx(q)

    f = StringIO()
    document.save(f)
    length = f.tell()
    f.seek(0)
    response = HttpResponse(
        f.getvalue(),
        content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
    )
    response['Content-Disposition'] = 'attachment; filename=pauk_{:_<10}_{}.docx'.format(slug, datetime.now().strftime(
        '%H.%M_%d.%m.%Y'))
    response['Content-Length'] = length
    response['Count'] = q.count()
    return response
Example #25
0
    def open(self, enforce_checksum=False):
        """Opens the compressed file and returns a file-like object that
        can be used to access its uncompressed content.

        :param enforce_checksum: If the checksum validation should be enforce
        :type enforce_checksum: bool

        :return: file-like object with the uncompressed file content
        :rtype: file

        :raise Exception: If the file to open is a directory
        :raise DecompressError: If there's a decompression error
        :raise SAPCARInvalidFileException: If the file is invalid
        :raise SAPCARInvalidChecksumException: If the checksum is invalid
        """
        # Check that the type is file, so we don't try to extract from a directory
        if self.is_directory():
            raise Exception("Invalid file type")

        # Extract the file to a file-like object
        out_file = StringIO()
        checksum = self._file_format.extract(out_file)
        out_file.seek(0)

        # Validate the checksum if required
        if enforce_checksum:
            if checksum != self.calculate_checksum(out_file.getvalue()):
                raise SAPCARInvalidChecksumException("Invalid checksum found")
            out_file.seek(0)

        # Return the extracted file
        return out_file
Example #26
0
    def create_avatar(self):
         
         if not self.foto_capa:
             return
 
         THUMBNAIL_SIZE = (215, 215)
         DJANGO_TYPE = self.foto_capa.file.content_type
 
         if DJANGO_TYPE == 'image/jpeg':
             PIL_TYPE = 'jpeg'
             FILE_EXTENSION = 'jpg'
         elif DJANGO_TYPE == 'image/png':
             PIL_TYPE = 'png'
             FILE_EXTENSION = 'png'
 
         foto_capa = Image.open(StringIO(self.foto_capa.read()))
         foto_capa.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)
         temp_handle = StringIO()
         foto_capa.save(temp_handle, PIL_TYPE)
         temp_handle.seek(0)
 
         suf = SimpleUploadedFile(os.path.split(
                                                self.foto_capa.name)[-1],
                                                temp_handle.read(), 
                                                content_type=DJANGO_TYPE)
         self.foto_avatar.save('%s_thumbnail.%s' % (os.path.splitext(suf.name)[0], 
                                FILE_EXTENSION),
                                suf,
                                save=False)
Example #27
0
 def send_tryton_url(self, path):
     self.send_response(300)
     hostname = CONFIG['hostname'] or unicode(socket.getfqdn(), 'utf8')
     hostname = '.'.join(encodings.idna.ToASCII(part) for part in
         hostname.split('.'))
     values = {
         'hostname': hostname,
         'path': path,
         }
     content = StringIO()
     content.write('<html')
     content.write('<head>')
     content.write('<meta http-equiv="Refresh" '
         'content="0;url=tryton://%(hostname)s%(path)s"/>' % values)
     content.write('<title>Moved</title>')
     content.write('</head>')
     content.write('<body>')
     content.write('<h1>Moved</h1>')
     content.write('<p>This page has moved to '
         '<a href="tryton://%(hostname)s%(path)s">'
         'tryton://%(hostname)s%(path)s</a>.</p>' % values)
     content.write('</body>')
     content.write('</html>')
     length = content.tell()
     content.seek(0)
     self.send_header('Location', 'tryton://%(hostname)s%(path)s' % values)
     self.send_header('Content-type', 'text/html')
     self.send_header('Content-Length', str(length))
     self.end_headers()
     self.copyfile(content, self.wfile)
     content.close()
Example #28
0
    def getFile(self, site, inner_path):
        # Use streamFile if client supports it
        if config.stream_downloads and self.connection and self.connection.handshake and self.connection.handshake["rev"] > 310:
            return self.streamFile(site, inner_path)

        location = 0
        if config.use_tempfiles:
            buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b')
        else:
            buff = StringIO()

        s = time.time()
        while True:  # Read in 512k parts
            res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location})

            if not res or "body" not in res:  # Error
                return False

            buff.write(res["body"])
            res["body"] = None  # Save memory
            if res["location"] == res["size"]:  # End of file
                break
            else:
                location = res["location"]

        self.download_bytes += res["location"]
        self.download_time += (time.time() - s)
        if self.site:
            self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + res["location"]
        buff.seek(0)
        return buff
Example #29
0
	def test_commit_serialization(self):
		assert_commit_serialization(self.gitrwrepo, self.head_sha_2k, True)
		
		rwrepo = self.gitrwrepo
		make_object = rwrepo.odb.store
		# direct serialization - deserialization can be tested afterwards
		# serialization is probably limited on IO
		hc = rwrepo.commit(self.head_sha_2k)
		
		commits = list()
		nc = 5000
		st = time()
		for i in xrange(nc):
			cm = Commit(	rwrepo, Commit.NULL_BIN_SHA, hc.tree, 
							hc.author, hc.authored_date, hc.author_tz_offset, 
							hc.committer, hc.committed_date, hc.committer_tz_offset, 
							str(i), parents=hc.parents, encoding=hc.encoding)
			
			stream = StringIO()
			cm._serialize(stream)
			slen = stream.tell()
			stream.seek(0)
			
			cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
		# END commit creation
		elapsed = time() - st
		
		print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed)
    def do_POST(self):
        if not self.authenticate():
            return
        """Serve a POST request."""
        r, info, meta = self.deal_post_data()
        res = 'Success' if r else 'Failure'
        log.info("Upload {} {} by: {}".format(res, info, self.client_address))
        f = StringIO()
        ref = self.headers.get('referer', 'None')

        response = {'result': res,
                    'referer': ref,
                    'info': info}
        result = """
        <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
        <html><title>Upload Result Page</title>
        <body><h2>Upload Result Page</h2>
        <hr>
        <strong>{result}:</strong>
        {info}
        <br><a href="{referer}">back</a>"
        </body></html>
        """
        f.write(result.format(**response))
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        self.send_header("Content-type", "text/html")
        self.send_header("Content-Length", str(length))
        self.end_headers()
        if f:
            self.copyfile(f, self.wfile)
            f.close()
Example #31
0
def explore():
    """
    Returns a gallery consisting of the images of one of the dbs
    """
    job = job_from_request()
    # Get LMDB
    db = flask.request.args.get('db', 'train')
    if 'train' in db.lower():
        task = job.train_db_task()
    elif 'val' in db.lower():
        task = job.val_db_task()
    elif 'test' in db.lower():
        task = job.test_db_task()
    if task is None:
        raise ValueError('No create_db task for {0}'.format(db))
    if task.status != 'D':
        raise ValueError(
            "This create_db task's status should be 'D' but is '{0}'".format(
                task.status))
    if task.backend != 'lmdb':
        raise ValueError(
            "Backend is {0} while expected backend is lmdb".format(
                task.backend))
    db_path = job.path(task.db_name)
    labels = task.get_labels()

    page = int(flask.request.args.get('page', 0))
    size = int(flask.request.args.get('size', 25))
    label = flask.request.args.get('label', None)

    if label is not None:
        try:
            label = int(label)
        except ValueError:
            label = None

    reader = DbReader(db_path)
    count = 0
    imgs = []

    min_page = max(0, page - 5)
    if label is None:
        total_entries = reader.total_entries
    else:
        total_entries = task.distribution[str(label)]

    max_page = min((total_entries - 1) / size, page + 5)
    pages = range(min_page, max_page + 1)
    for key, value in reader.entries():
        if count >= page * size:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            if label is None or datum.label == label:
                if datum.encoded:
                    s = StringIO()
                    s.write(datum.data)
                    s.seek(0)
                    img = PIL.Image.open(s)
                else:
                    import caffe.io
                    arr = caffe.io.datum_to_array(datum)
                    # CHW -> HWC
                    arr = arr.transpose((1, 2, 0))
                    if arr.shape[2] == 1:
                        # HWC -> HW
                        arr = arr[:, :, 0]
                    elif arr.shape[2] == 3:
                        # BGR -> RGB
                        # XXX see issue #59
                        arr = arr[:, :, [2, 1, 0]]
                    img = PIL.Image.fromarray(arr)
                imgs.append({
                    "label": labels[datum.label],
                    "b64": utils.image.embed_image_html(img)
                })
        if label is None:
            count += 1
        else:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            if datum.label == int(label):
                count += 1
        if len(imgs) >= size:
            break

    return flask.render_template('datasets/images/explore.html',
                                 page=page,
                                 size=size,
                                 job=job,
                                 imgs=imgs,
                                 labels=labels,
                                 pages=pages,
                                 label=label,
                                 total_entries=total_entries,
                                 db=db)
Example #32
0
class Curl:
    """
    Class to control curl on behalf of the application.
    """
    agent = 'Googlebot/2.1 (+http://www.google.com/bot.html)'
    cookie = None
    dropcookie = None
    referer = None
    headers = None
    proxy = None
    ignoreproxy = None
    tcp_nodelay = None
    xforw = None
    xclient = None
    atype = None
    acred = None
    #acert = None
    retries = 1
    delay = 0
    followred = 0
    fli = None

    def __init__(
        self,
        base_url="",
        fakeheaders=[
            'Accept: image/gif, image/x-bitmap, image/jpeg, image/pjpeg',
            'Connection: Keep-Alive',
            'Content-type: application/x-www-form-urlencoded; charset=UTF-8'
        ]):
        self.handle = pycurl.Curl()
        self._closed = False
        self.set_url(base_url)
        self.verbosity = 0
        self.signals = 1
        self.payload = ""
        self.header = StringIO()
        self.fakeheaders = fakeheaders
        self.headers = None
        self.set_option(pycurl.SSL_VERIFYHOST, 0)
        self.set_option(pycurl.SSL_VERIFYPEER, 0)
        self.set_option(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3)
        self.set_option(pycurl.FOLLOWLOCATION, 0)
        self.set_option(pycurl.MAXREDIRS, 50)
        # this is 'black magic'
        self.set_option(pycurl.COOKIEFILE, '/dev/null')
        self.set_option(pycurl.COOKIEJAR, '/dev/null')
        self.set_timeout(30)
        self.set_option(pycurl.NETRC, 1)
        self.set_nosignals(1)

        def payload_callback(x):
            self.payload += x

        self.set_option(pycurl.WRITEFUNCTION, payload_callback)

        def header_callback(x):
            self.header.write(x)

        self.set_option(pycurl.HEADERFUNCTION, header_callback)

    def set_url(self, url):
        """
        Set the base url.
        """
        self.base_url = url
        self.set_option(pycurl.URL, self.base_url)
        return url

    def set_cookie(self, cookie):
        """
        Set the app cookie.
        """
        self.cookie = cookie
        self.dropcookie = dropcookie
        if dropcookie:
            self.set_option(pycurl.COOKIELIST, 'ALL')
            self.set_option(pycurl.COOKIE, None)
        else:
            self.set_option(pycurl.COOKIELIST, '')
            self.set_option(pycurl.COOKIE, self.cookie)
        return cookie

    def set_agent(self, agent):
        """
        Set the user agent.
        """
        self.agent = agent
        self.set_option(pycurl.USERAGENT, self.agent)
        return agent

    def set_referer(self, referer):
        """
        Set the referer.
        """
        self.referer = referer
        self.set_option(pycurl.REFERER, self.referer)
        return referer

    def set_headers(self, headers):
        """
        Set extra headers.
        """
        self.headers = headers
        self.headers = self.headers.split("\n")
        for headerValue in self.headers:
            header, value = headerValue.split(": ")

            if header and value:
                self.set_option(pycurl.HTTPHEADER, (header, value))
        return headers

    def set_proxy(self, ignoreproxy, proxy):
        """
        Set the proxy to use.
        """
        self.proxy = proxy
        self.ignoreproxy = ignoreproxy
        if ignoreproxy:
            self.set_option(pycurl.PROXY, "")
        else:
            self.set_option(pycurl.PROXY, self.proxy)
        return proxy

    def set_option(self, *args):
        """
        Set the given option.
        """
        apply(self.handle.setopt, args)

    def set_verbosity(self, level):
        """
        Set the verbosity level.
        """
        self.set_option(pycurl.VERBOSE, level)

    def set_nosignals(self, signals="1"):
        """
        Disable signals.

        curl will be using other means besides signals to timeout
        """
        self.signals = signals
        self.set_option(pycurl.NOSIGNAL, self.signals)
        return signals

    def set_tcp_nodelay(self, tcp_nodelay):
        """
        Set the TCP_NODELAY option.
        """
        self.tcp_nodelay = tcp_nodelay
        self.set_option(pycurl.TCP_NODELAY, tcp_nodelay)
        return tcp_nodelay

    def set_timeout(self, timeout):
        """
        Set timeout for requests.
        """
        self.set_option(pycurl.CONNECTTIMEOUT, timeout)
        self.set_option(pycurl.TIMEOUT, timeout)
        return timeout

    def set_follow_redirections(self, followred, fli):
        """
        Set follow locations parameters to follow redirection pages (302)
        """
        self.followred = followred
        self.fli = fli
        if followred:
            self.set_option(pycurl.FOLLOWLOCATION, 1)
            self.set_option(pycurl.MAXREDIRS, 50)
            if fli:
                self.set_option(pycurl.MAXREDIRS, fli)
        else:
            self.set_option(pycurl.FOLLOWLOCATION, 0)
        return followred

    def do_head_check(self, urls):
        """
        Send a HEAD request before to start to inject to verify stability of the target
        """
        for u in urls:
            self.set_option(pycurl.URL, u)
            self.set_option(pycurl.NOBODY, 1)
            self.set_option(pycurl.FOLLOWLOCATION, 0)
            self.set_option(pycurl.MAXREDIRS, 50)
            self.set_option(pycurl.SSL_VERIFYHOST, 0)
            self.set_option(pycurl.SSL_VERIFYPEER, 0)
            if self.fakeheaders:
                from XSSer.randomip import RandomIP
                if self.xforw:
                    generate_random_xforw = RandomIP()
                    xforwip = generate_random_xforw._generateip('')
                    xforwfakevalue = ['X-Forwarded-For: ' + str(xforwip)]
                if self.xclient:
                    generate_random_xclient = RandomIP()
                    xclientip = generate_random_xclient._generateip('')
                    xclientfakevalue = ['X-Client-IP: ' + str(xclientip)]
                if self.xforw:
                    self.set_option(pycurl.HTTPHEADER,
                                    self.fakeheaders + xforwfakevalue)
                    if self.xclient:
                        self.set_option(
                            pycurl.HTTPHEADER, self.fakeheaders +
                            xforwfakevalue + xclientfakevalue)
                elif self.xclient:
                    self.set_option(pycurl.HTTPHEADER,
                                    self.fakeheaders + xclientfakevalue)
            if self.headers:
                self.fakeheaders = self.fakeheaders + self.headers
            self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
            if self.agent:
                self.set_option(pycurl.USERAGENT, self.agent)
            if self.referer:
                self.set_option(pycurl.REFERER, self.referer)
            if self.proxy:
                self.set_option(pycurl.PROXY, self.proxy)
            if self.ignoreproxy:
                self.set_option(pycurl.PROXY, "")
            if self.timeout:
                self.set_option(pycurl.CONNECTTIMEOUT, self.timeout)
                self.set_option(pycurl.TIMEOUT, self.timeout)
            if self.signals:
                self.set_option(pycurl.NOSIGNAL, self.signals)
            if self.tcp_nodelay:
                self.set_option(pycurl.TCP_NODELAY, self.tcp_nodelay)
            if self.cookie:
                self.set_option(pycurl.COOKIE, self.cookie)
            try:
                self.handle.perform()
            except:
                return
            if str(self.handle.getinfo(pycurl.HTTP_CODE)) in ["302", "301"]:
                self.set_option(pycurl.FOLLOWLOCATION, 1)

    def __request(self, relative_url=None):
        """
        Perform a request and returns the payload.
        """
        if self.fakeheaders:
            from XSSer.randomip import RandomIP
            if self.xforw:
                """
                Set the X-Forwarded-For to use.
                """
                generate_random_xforw = RandomIP()
                xforwip = generate_random_xforw._generateip('')
                #xforwip = '127.0.0.1'
                xforwfakevalue = ['X-Forwarded-For: ' + str(xforwip)]
            if self.xclient:
                """ 
                Set the X-Client-IP to use.
                """
                generate_random_xclient = RandomIP()
                xclientip = generate_random_xclient._generateip('')
                #xclientip = '127.0.0.1'
                xclientfakevalue = ['X-Client-IP: ' + str(xclientip)]
            if self.xforw:
                self.set_option(pycurl.HTTPHEADER,
                                self.fakeheaders + xforwfakevalue)
                if self.xclient:
                    self.set_option(
                        pycurl.HTTPHEADER,
                        self.fakeheaders + xforwfakevalue + xclientfakevalue)
            elif self.xclient:
                self.set_option(pycurl.HTTPHEADER,
                                self.fakeheaders + xclientfakevalue)
        if self.headers:
            # XXX sanitize user input
            self.fakeheaders = self.fakeheaders + self.headers
        self.set_option(pycurl.HTTPHEADER, self.fakeheaders)

        if self.agent:
            self.set_option(pycurl.USERAGENT, self.agent)
        if self.referer:
            self.set_option(pycurl.REFERER, self.referer)
        if self.proxy:
            self.set_option(pycurl.PROXY, self.proxy)
        if self.ignoreproxy:
            self.set_option(pycurl.PROXY, "")
        if relative_url:
            self.set_option(pycurl.URL,
                            os.path.join(self.base_url, relative_url))
        if self.timeout:
            self.set_option(pycurl.CONNECTTIMEOUT, self.timeout)
            self.set_option(pycurl.TIMEOUT, self.timeout)
        if self.signals:
            self.set_option(pycurl.NOSIGNAL, self.signals)
        if self.tcp_nodelay:
            self.set_option(pycurl.TCP_NODELAY, self.tcp_nodelay)
        if self.cookie:
            self.set_option(pycurl.COOKIE, self.cookie)
        if self.followred:
            self.set_option(pycurl.FOLLOWLOCATION, 1)
            self.set_option(pycurl.MAXREDIRS, 50)
            if self.fli:
                self.set_option(pycurl.MAXREDIRS, int(self.fli))
        else:
            self.set_option(pycurl.FOLLOWLOCATION, 0)
            if self.fli:
                print "\n[E] You must launch --follow-redirects command to set correctly this redirections limit\n"
                return
        """ 
        Set the HTTP authentication method: Basic, Digest, GSS, NTLM or Certificate
        """
        if self.atype and self.acred:
            atypelower = self.atype.lower()
            if atypelower not in ("basic", "digest", "ntlm", "gss"):
                print "\n[E] HTTP authentication type value must be: Basic, Digest, GSS or NTLM\n"
                return
            acredregexp = re.search("^(.*?)\:(.*?)$", self.acred)
            if not acredregexp:
                print "\n[E] HTTP authentication credentials value must be in format username:password\n"
                return
            user = acredregexp.group(1)
            password = acredregexp.group(2)
            self.set_option(pycurl.USERPWD, "%s:%s" % (user, password))

            if atypelower == "basic":
                self.set_option(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
            elif atypelower == "digest":
                self.set_option(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
            elif atypelower == "ntlm":
                self.set_option(pycurl.HTTPAUTH, pycurl.HTTPAUTH_NTLM)
            elif atypelower == "gss":
                self.set_option(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)
            else:
                self.set_option(pycurl.HTTPAUTH, None)

            self.set_option(pycurl.HTTPHEADER, ["Accept:"])

        elif self.atype and not self.acred:
            print "\n[E] You specified the HTTP authentication type, but did not provide the credentials\n"
            return
        elif not self.atype and self.acred:
            print "\n[E] You specified the HTTP authentication credentials, but did not provide the type\n"
            return
        #if self.acert:
        #    acertregexp = re.search("^(.+?),\s*(.+?)$", self.acert)
        #    if not acertregexp:
        #        print "\n[E] HTTP authentication certificate option must be 'key_file,cert_file'\n"
        #        return
        #    # os.path.expanduser for support of paths with ~
        #    key_file = os.path.expanduser(acertregexp.group(1))
        #    cert_file = os.path.expanduser(acertregexp.group(2))
        #    self.set_option(pycurl.SSL_VERIFYHOST, 0)
        #    self.set_option(pycurl.SSL_VERIFYPEER, 1)
        #    self.set_option(pycurl.SSH_PUBLIC_KEYFILE, key_file)
        #    self.set_option(pycurl.CAINFO, cert_file)
        #    self.set_option(pycurl.SSLCERT, cert_file)
        #    self.set_option(pycurl.SSLCERTTYPE, 'p12')
        #    self.set_option(pycurl.SSLCERTPASSWD, '1234')
        #    self.set_option(pycurl.SSLKEY, key_file)
        #    self.set_option(pycurl.SSLKEYPASSWD, '1234')
        #    for file in (key_file, cert_file):
        #        if not os.path.exists(file):
        #            print "\n[E] File '%s' doesn't exist\n" % file
        #            return

        self.set_option(pycurl.SSL_VERIFYHOST, 0)
        self.set_option(pycurl.SSL_VERIFYPEER, 0)

        self.header.seek(0, 0)
        self.payload = ""

        for count in range(0, self.retries):
            time.sleep(self.delay)
            if self.dropcookie:
                self.set_option(pycurl.COOKIELIST, 'ALL')
                nocookie = ['Set-Cookie: ', '']
                self.set_option(pycurl.HTTPHEADER, self.fakeheaders + nocookie)
            try:
                self.handle.perform()
            except:
                return
        return self.payload

    def get(self, url="", params=None):
        """
        Get a url.
        """
        if params:
            url += "?" + urllib.urlencode(params)
        self.set_option(pycurl.HTTPGET, 1)
        return self.__request(url)

    def post(self, cgi, params):
        """
        Post a url.
        """
        self.set_option(pycurl.POST, 1)
        self.set_option(pycurl.POSTFIELDS, params)
        return self.__request(cgi)

    def body(self):
        """
        Get the payload from the latest operation.
        """
        return self.payload

    def info(self):
        """
        Get an info dictionary from the selected url.
        """
        self.header.seek(0, 0)
        url = self.handle.getinfo(pycurl.EFFECTIVE_URL)
        if url[:5] == 'http:':
            self.header.readline()
            m = mimetools.Message(self.header)
        else:
            m = mimetools.Message(StringIO())
        #m['effective-url'] = url
        m['http-code'] = str(self.handle.getinfo(pycurl.HTTP_CODE))
        m['total-time'] = str(self.handle.getinfo(pycurl.TOTAL_TIME))
        m['namelookup-time'] = str(self.handle.getinfo(pycurl.NAMELOOKUP_TIME))
        m['connect-time'] = str(self.handle.getinfo(pycurl.CONNECT_TIME))
        #m['pretransfer-time'] = str(self.handle.getinfo(pycurl.PRETRANSFER_TIME))
        #m['redirect-time'] = str(self.handle.getinfo(pycurl.REDIRECT_TIME))
        #m['redirect-count'] = str(self.handle.getinfo(pycurl.REDIRECT_COUNT))
        #m['size-upload'] = str(self.handle.getinfo(pycurl.SIZE_UPLOAD))
        #m['size-download'] = str(self.handle.getinfo(pycurl.SIZE_DOWNLOAD))
        #m['speed-upload'] = str(self.handle.getinfo(pycurl.SPEED_UPLOAD))
        m['header-size'] = str(self.handle.getinfo(pycurl.HEADER_SIZE))
        m['request-size'] = str(self.handle.getinfo(pycurl.REQUEST_SIZE))
        m['response-code'] = str(self.handle.getinfo(pycurl.RESPONSE_CODE))
        m['ssl-verifyresult'] = str(
            self.handle.getinfo(pycurl.SSL_VERIFYRESULT))
        m['content-type'] = (self.handle.getinfo(pycurl.CONTENT_TYPE)
                             or '').strip(';')
        m['cookielist'] = str(self.handle.getinfo(pycurl.INFO_COOKIELIST))
        #m['content-length-download'] = str(self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD))
        #m['content-length-upload'] = str(self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD))
        #m['encoding'] = str(self.handle.getinfo(pycurl.ENCODING))
        return m

    @classmethod
    def print_options(cls):
        """
        Print selected options.
        """
        print "\n[-]Verbose: active"
        print "[-]Cookie:", cls.cookie
        print "[-]HTTP User Agent:", cls.agent
        print "[-]HTTP Referer:", cls.referer
        print "[-]Extra HTTP Headers:", cls.headers
        if cls.xforw == True:
            print "[-]X-Forwarded-For:", "Random IP"
        else:
            print "[-]X-Forwarded-For:", cls.xforw
        if cls.xclient == True:
            print "[-]X-Client-IP:", "Random IP"
        else:
            print "[-]X-Client-IP:", cls.xclient
        print "[-]Authentication Type:", cls.atype
        print "[-]Authentication Credentials:", cls.acred
        if cls.ignoreproxy == True:
            print "[-]Proxy:", "Ignoring system default HTTP proxy"
        else:
            print "[-]Proxy:", cls.proxy
        print "[-]Timeout:", cls.timeout
        if cls.tcp_nodelay == True:
            print "[-]Delaying:", "TCP_NODELAY activate"
        else:
            print "[-]Delaying:", cls.delay, "seconds"
        if cls.followred == True:
            print "[-]Follow 302 code:", "active"
            if cls.fli:
                print "[-]Limit to follow:", cls.fli
        else:
            print "[-]Delaying:", cls.delay, "seconds"

        print "[-]Retries:", cls.retries, "\n"

    def answered(self, check):
        """
        Check for occurence of a string in the payload from
        the latest operation.
        """
        return self.payload.find(check) >= 0

    def close(self):
        """
        Close the curl handle.
        """
        self.handle.close()
        self.header.close()
        self._closed = True

    def __del__(self):
        if not self._closed:
            self.close()
Example #33
0
def cluster(ctx, config):
    """
    Handle the creation and removal of a ceph cluster.

    On startup:
        Create directories needed for the cluster.
        Create remote journals for all osds.
        Create and set keyring.
        Copy the monmap to tht test systems.
        Setup mon nodes.
        Setup mds nodes.
        Mkfs osd nodes.
        Add keyring information to monmaps
        Mkfs mon nodes.

    On exit:
        If errors occured, extract a failure message and store in ctx.summary.
        Unmount all test files and temporary journaling files.
        Save the monitor information and archive all ceph logs.
        Cleanup the keyring setup, and remove all monitor map and data files left over.

    :param ctx: Context
    :param config: Configuration
    """
    if ctx.config.get('use_existing_cluster', False) is True:
        log.info("'use_existing_cluster' is true; skipping cluster creation")
        yield

    testdir = teuthology.get_testdir(ctx)
    log.info('Creating ceph cluster...')
    run.wait(
        ctx.cluster.run(
            args=[
                'install',
                '-d',
                '-m0755',
                '--',
                '{tdir}/data'.format(tdir=testdir),
            ],
            wait=False,
        ))

    run.wait(
        ctx.cluster.run(
            args=[
                'sudo',
                'install',
                '-d',
                '-m0777',
                '--',
                '/var/run/ceph',
            ],
            wait=False,
        ))

    devs_to_clean = {}
    remote_to_roles_to_devs = {}
    remote_to_roles_to_journals = {}
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    for remote, roles_for_host in osds.remotes.iteritems():
        devs = teuthology.get_scratch_devices(remote)
        roles_to_devs = {}
        roles_to_journals = {}
        if config.get('fs'):
            log.info('fs option selected, checking for scratch devs')
            log.info('found devs: %s' % (str(devs), ))
            devs_id_map = teuthology.get_wwn_id_map(remote, devs)
            iddevs = devs_id_map.values()
            roles_to_devs = assign_devs(
                teuthology.roles_of_type(roles_for_host, 'osd'), iddevs)
            if len(roles_to_devs) < len(iddevs):
                iddevs = iddevs[len(roles_to_devs):]
            devs_to_clean[remote] = []

        if config.get('block_journal'):
            log.info('block journal enabled')
            roles_to_journals = assign_devs(
                teuthology.roles_of_type(roles_for_host, 'osd'), iddevs)
            log.info('journal map: %s', roles_to_journals)

        if config.get('tmpfs_journal'):
            log.info('tmpfs journal enabled')
            roles_to_journals = {}
            remote.run(args=['sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt'])
            for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
                tmpfs = '/mnt/osd.%s' % osd
                roles_to_journals[osd] = tmpfs
                remote.run(args=['truncate', '-s', '1500M', tmpfs])
            log.info('journal map: %s', roles_to_journals)

        log.info('dev map: %s' % (str(roles_to_devs), ))
        remote_to_roles_to_devs[remote] = roles_to_devs
        remote_to_roles_to_journals[remote] = roles_to_journals

    log.info('Generating config...')
    remotes_and_roles = ctx.cluster.remotes.items()
    roles = [role_list for (remote, role_list) in remotes_and_roles]
    ips = [
        host for (host, port) in (remote.ssh.get_transport().getpeername()
                                  for (remote, role_list) in remotes_and_roles)
    ]
    conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
    for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
        for role, journal in roles_to_journals.iteritems():
            key = "osd." + str(role)
            if key not in conf:
                conf[key] = {}
            conf[key]['osd journal'] = journal
    for section, keys in config['conf'].iteritems():
        for key, value in keys.iteritems():
            log.info("[%s] %s = %s" % (section, key, value))
            if section not in conf:
                conf[section] = {}
            conf[section][key] = value

    if config.get('tmpfs_journal'):
        conf['journal dio'] = False

    ctx.ceph = argparse.Namespace()
    ctx.ceph.conf = conf

    keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring')

    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)

    firstmon = teuthology.get_first_mon(ctx, config)

    log.info('Setting up %s...' % firstmon)
    ctx.cluster.only(firstmon).run(args=[
        'sudo',
        'adjust-ulimits',
        'ceph-coverage',
        coverage_dir,
        'ceph-authtool',
        '--create-keyring',
        keyring_path,
    ], )
    ctx.cluster.only(firstmon).run(args=[
        'sudo',
        'adjust-ulimits',
        'ceph-coverage',
        coverage_dir,
        'ceph-authtool',
        '--gen-key',
        '--name=mon.',
        keyring_path,
    ], )
    ctx.cluster.only(firstmon).run(args=[
        'sudo',
        'chmod',
        '0644',
        keyring_path,
    ], )
    (mon0_remote, ) = ctx.cluster.only(firstmon).remotes.keys()
    fsid = teuthology.create_simple_monmap(
        ctx,
        remote=mon0_remote,
        conf=conf,
    )
    if not 'global' in conf:
        conf['global'] = {}
    conf['global']['fsid'] = fsid

    conf_path = config.get('conf_path', DEFAULT_CONF_PATH)
    log.info('Writing %s for FSID %s...' % (conf_path, fsid))
    write_conf(ctx, conf_path)

    log.info('Creating admin key on %s...' % firstmon)
    ctx.cluster.only(firstmon).run(args=[
        'sudo',
        'adjust-ulimits',
        'ceph-coverage',
        coverage_dir,
        'ceph-authtool',
        '--gen-key',
        '--name=client.admin',
        '--set-uid=0',
        '--cap',
        'mon',
        'allow *',
        '--cap',
        'osd',
        'allow *',
        '--cap',
        'mds',
        'allow *',
        keyring_path,
    ], )

    log.info('Copying monmap to all nodes...')
    keyring = teuthology.get_file(
        remote=mon0_remote,
        path=keyring_path,
    )
    monmap = teuthology.get_file(
        remote=mon0_remote,
        path='{tdir}/monmap'.format(tdir=testdir),
    )

    for rem in ctx.cluster.remotes.iterkeys():
        # copy mon key and initial monmap
        log.info('Sending monmap to node {remote}'.format(remote=rem))
        teuthology.sudo_write_file(remote=rem,
                                   path=keyring_path,
                                   data=keyring,
                                   perms='0644')
        teuthology.write_file(
            remote=rem,
            path='{tdir}/monmap'.format(tdir=testdir),
            data=monmap,
        )

    log.info('Setting up mon nodes...')
    mons = ctx.cluster.only(teuthology.is_type('mon'))
    run.wait(
        mons.run(
            args=[
                'adjust-ulimits',
                'ceph-coverage',
                coverage_dir,
                'osdmaptool',
                '-c',
                conf_path,
                '--clobber',
                '--createsimple',
                '{num:d}'.format(num=teuthology.num_instances_of_type(
                    ctx.cluster, 'osd'), ),
                '{tdir}/osdmap'.format(tdir=testdir),
                '--pg_bits',
                '2',
                '--pgp_bits',
                '4',
            ],
            wait=False,
        ), )

    log.info('Setting up mds nodes...')
    mdss = ctx.cluster.only(teuthology.is_type('mds'))
    for remote, roles_for_host in mdss.remotes.iteritems():
        for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
            remote.run(args=[
                'sudo',
                'mkdir',
                '-p',
                '/var/lib/ceph/mds/ceph-{id}'.format(id=id_),
                run.Raw('&&'),
                'sudo',
                'adjust-ulimits',
                'ceph-coverage',
                coverage_dir,
                'ceph-authtool',
                '--create-keyring',
                '--gen-key',
                '--name=mds.{id}'.format(id=id_),
                '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_),
            ], )

    cclient.create_keyring(ctx)
    log.info('Running mkfs on osd nodes...')

    ctx.disk_config = argparse.Namespace()
    ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs
    ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals
    ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
    ctx.disk_config.remote_to_roles_to_dev_fstype = {}

    log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(
        r=str(ctx.disk_config.remote_to_roles_to_dev)))
    for remote, roles_for_host in osds.remotes.iteritems():
        roles_to_devs = remote_to_roles_to_devs[remote]
        roles_to_journals = remote_to_roles_to_journals[remote]

        for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
            remote.run(args=[
                'sudo',
                'mkdir',
                '-p',
                '/var/lib/ceph/osd/ceph-{id}'.format(id=id_),
            ])
            log.info(str(roles_to_journals))
            log.info(id_)
            if roles_to_devs.get(id_):
                dev = roles_to_devs[id_]
                fs = config.get('fs')
                package = None
                mkfs_options = config.get('mkfs_options')
                mount_options = config.get('mount_options')
                if fs == 'btrfs':
                    #package = 'btrfs-tools'
                    if mount_options is None:
                        mount_options = ['noatime', 'user_subvol_rm_allowed']
                    if mkfs_options is None:
                        mkfs_options = [
                            '-m', 'single', '-l', '32768', '-n', '32768'
                        ]
                if fs == 'xfs':
                    #package = 'xfsprogs'
                    if mount_options is None:
                        mount_options = ['noatime']
                    if mkfs_options is None:
                        mkfs_options = ['-f', '-i', 'size=2048']
                if fs == 'ext4' or fs == 'ext3':
                    if mount_options is None:
                        mount_options = ['noatime', 'user_xattr']

                if mount_options is None:
                    mount_options = []
                if mkfs_options is None:
                    mkfs_options = []
                mkfs = ['mkfs.%s' % fs] + mkfs_options
                log.info('%s on %s on %s' % (mkfs, dev, remote))
                if package is not None:
                    remote.run(
                        args=['sudo', 'apt-get', 'install', '-y', package],
                        stdout=StringIO(),
                    )

                try:
                    remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs +
                               [dev])
                except run.CommandFailedError:
                    # Newer btfs-tools doesn't prompt for overwrite, use -f
                    if '-f' not in mount_options:
                        mkfs_options.append('-f')
                        mkfs = ['mkfs.%s' % fs] + mkfs_options
                        log.info('%s on %s on %s' % (mkfs, dev, remote))
                    remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs +
                               [dev])

                log.info('mount %s on %s -o %s' %
                         (dev, remote, ','.join(mount_options)))
                remote.run(args=[
                    'sudo',
                    'mount',
                    '-t',
                    fs,
                    '-o',
                    ','.join(mount_options),
                    dev,
                    os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(
                        id=id_)),
                ])
                if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
                    ctx.disk_config.remote_to_roles_to_dev_mount_options[
                        remote] = {}
                ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][
                    id_] = mount_options
                if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
                    ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
                ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
                devs_to_clean[remote].append(
                    os.path.join(
                        os.path.join('/var/lib/ceph/osd',
                                     'ceph-{id}'.format(id=id_)), ))

        for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
            remote.run(args=[
                'sudo',
                'MALLOC_CHECK_=3',
                'adjust-ulimits',
                'ceph-coverage',
                coverage_dir,
                'ceph-osd',
                '--mkfs',
                '--mkkey',
                '-i',
                id_,
                '--monmap',
                '{tdir}/monmap'.format(tdir=testdir),
            ], )

    log.info('Reading keys from all nodes...')
    keys_fp = StringIO()
    keys = []
    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
        for type_ in ['mds', 'osd']:
            for id_ in teuthology.roles_of_type(roles_for_host, type_):
                data = teuthology.get_file(
                    remote=remote,
                    path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format(
                        type=type_,
                        id=id_,
                    ),
                    sudo=True,
                )
                keys.append((type_, id_, data))
                keys_fp.write(data)
    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
        for type_ in ['client']:
            for id_ in teuthology.roles_of_type(roles_for_host, type_):
                data = teuthology.get_file(
                    remote=remote,
                    path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_))
                keys.append((type_, id_, data))
                keys_fp.write(data)

    log.info('Adding keys to all mons...')
    writes = mons.run(
        args=[
            'sudo',
            'tee',
            '-a',
            keyring_path,
        ],
        stdin=run.PIPE,
        wait=False,
        stdout=StringIO(),
    )
    keys_fp.seek(0)
    teuthology.feed_many_stdins_and_close(keys_fp, writes)
    run.wait(writes)
    for type_, id_, data in keys:
        run.wait(
            mons.run(
                args=[
                    'sudo',
                    'adjust-ulimits',
                    'ceph-coverage',
                    coverage_dir,
                    'ceph-authtool',
                    keyring_path,
                    '--name={type}.{id}'.format(
                        type=type_,
                        id=id_,
                    ),
                ] + list(teuthology.generate_caps(type_)),
                wait=False,
            ), )

    log.info('Running mkfs on mon nodes...')
    for remote, roles_for_host in mons.remotes.iteritems():
        for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
            remote.run(args=[
                'sudo',
                'mkdir',
                '-p',
                '/var/lib/ceph/mon/ceph-{id}'.format(id=id_),
            ], )
            remote.run(args=[
                'sudo',
                'adjust-ulimits',
                'ceph-coverage',
                coverage_dir,
                'ceph-mon',
                '--mkfs',
                '-i',
                id_,
                '--monmap={tdir}/monmap'.format(tdir=testdir),
                '--osdmap={tdir}/osdmap'.format(tdir=testdir),
                '--keyring={kpath}'.format(kpath=keyring_path),
            ], )

    run.wait(
        mons.run(
            args=[
                'rm',
                '--',
                '{tdir}/monmap'.format(tdir=testdir),
                '{tdir}/osdmap'.format(tdir=testdir),
            ],
            wait=False,
        ), )

    try:
        yield
    except Exception:
        # we need to know this below
        ctx.summary['success'] = False
        raise
    finally:
        (mon0_remote, ) = ctx.cluster.only(firstmon).remotes.keys()

        log.info('Checking cluster log for badness...')

        def first_in_ceph_log(pattern, excludes):
            """
            Find the first occurence of the pattern specified in the Ceph log,
            Returns None if none found.

            :param pattern: Pattern scanned for.
            :param excludes: Patterns to ignore.
            :return: First line of text (or None if not found)
            """
            args = [
                'sudo',
                'egrep',
                pattern,
                '/var/log/ceph/ceph.log',
            ]
            for exclude in excludes:
                args.extend([run.Raw('|'), 'egrep', '-v', exclude])
            args.extend([
                run.Raw('|'),
                'head',
                '-n',
                '1',
            ])
            r = mon0_remote.run(
                stdout=StringIO(),
                args=args,
            )
            stdout = r.stdout.getvalue()
            if stdout != '':
                return stdout
            return None

        if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
                             config['log_whitelist']) is not None:
            log.warning('Found errors (ERR|WRN|SEC) in cluster log')
            ctx.summary['success'] = False
            # use the most severe problem as the failure reason
            if 'failure_reason' not in ctx.summary:
                for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
                    match = first_in_ceph_log(pattern, config['log_whitelist'])
                    if match is not None:
                        ctx.summary['failure_reason'] = \
                            '"{match}" in cluster log'.format(
                            match=match.rstrip('\n'),
                            )
                        break

        for remote, dirs in devs_to_clean.iteritems():
            for dir_ in dirs:
                log.info('Unmounting %s on %s' % (dir_, remote))
                try:
                    remote.run(args=[
                        'sync',
                        run.Raw('&&'), 'sudo', 'umount', '-f', dir_
                    ])
                except Exception as e:
                    remote.run(args=[
                        'sudo',
                        run.Raw('PATH=/usr/sbin:$PATH'),
                        'lsof',
                        run.Raw(';'),
                        'ps',
                        'auxf',
                    ])
                    raise e

        if config.get('tmpfs_journal'):
            log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
            for remote, roles_for_host in osds.remotes.iteritems():
                remote.run(
                    args=['sudo', 'umount', '-f', '/mnt'],
                    check_status=False,
                )

        if ctx.archive is not None and \
           not (ctx.config.get('archive-on-error') and ctx.summary['success']):

            # archive mon data, too
            log.info('Archiving mon data...')
            path = os.path.join(ctx.archive, 'data')
            os.makedirs(path)
            for remote, roles in mons.remotes.iteritems():
                for role in roles:
                    if role.startswith('mon.'):
                        teuthology.pull_directory_tarball(
                            remote, '/var/lib/ceph/mon',
                            path + '/' + role + '.tgz')

        log.info('Cleaning ceph cluster...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'sudo',
                    'rm',
                    '-rf',
                    '--',
                    conf_path,
                    keyring_path,
                    '{tdir}/data'.format(tdir=testdir),
                    '{tdir}/monmap'.format(tdir=testdir),
                ],
                wait=False,
            ), )
Example #34
0
    def upload_raw_array(self,
                         object_name,
                         array,
                         gzip=True,
                         acl=DEFAULT_ACL,
                         **metadata):
        """Upload a a binary representation of a np.ndarray

        This method reads the array content from memory to upload.
        It does not have any overhead.

        Parameters
        ----------
        object_name : str
        array : np.ndarray
        gzip  : bool (defaults True)
            Whether to gzip array content
        acl : str
            ACL for the object
        **metadata : optional

        Notes
        -----
        This method also uploads the array ``dtype``, ``shape``, and ``gzip``
        flag as metadata
        """
        if array.nbytes >= 2**31:
            # avoid zlib issues
            gzip = False

        order = 'F' if array.flags.f_contiguous else 'C'
        if not array.flags['%s_CONTIGUOUS' % order]:
            print(
                'array is a slice along a non-contiguous axis. copying the array '
                'before saving (will use extra memory)')
            array = np.array(array, order=order)

        meta = dict(dtype=array.dtype.str,
                    shape=','.join(map(str, array.shape)),
                    gzip=str(gzip),
                    order=order)

        # check for conflicts in metadata
        metadata_keys = []
        for k in metadata.keys():
            # check for conflicts in metadata
            metadata_keys.append(k in meta)

        assert not any(metadata_keys)
        meta.update(metadata)

        if gzip:
            if six.PY3 and array.flags['F_CONTIGUOUS']:
                # eventually, array.data below should be changed to np.getbuffer(array)
                # (not yet working in python3 numpy)
                # F-contiguous arrays break gzip in python 3
                array = array.T
            zipdata = StringIO()
            gz = GzipFile(mode='wb', fileobj=zipdata)
            gz.write(array.data)
            gz.close()
            zipdata.seek(0)
            filestream = zipdata
            data_nbytes = get_fileobject_size(filestream)
        else:
            data_nbytes = array.nbytes
            filestream = StringIO(array.data)

        response = self.upload_object(object_name, filestream, acl=acl, **meta)

        return response
all_elements = {}

df2 = pd.DataFrame()
#--------------------------------------
# get the bucket contents
#--------------------------------------
bucket = storage.get_bucket('ptone-experiments')

for k in bucket.list_blobs(prefix="working-files/clinical_metadata/"):

    if 'counts.txt' in k.name:
        disease_type = k.name.split("/")[2].split(".")[0]

        data = StringIO()
        k.download_to_file(data)
        data.seek(0)

        df = pd.read_csv(data, sep="\t")

        df['disease_type'] = disease_type

        if df2.empty:
            df2 = df
            continue

        frames = [df, df2]

        result = pd.concat(frames)
        df2 = result
        data.close()
Example #36
0
            io.seek(0)
        return self

    def __close(self):
        self.__zf.close()
        self.__io.seek(0)

    def output(self):
        self.__close()
        return self.__io

    @property
    def size(self):
        self.__io.seek(0, 2)
        size = self.__io.tell()
        self.__io.seek(0)
        return size


if __name__ == '__main__':
    import glob
    zip = Zip()
    for filename in glob.glob('*.py'):
        with open(filename) as f:
            io = StringIO(f.read())
            io.seek(0)
        zip.add_file(io, filename)

    output_io = zip.output()
    with open('xxx.zip', 'wb') as f:
        f.write(output_io.getvalue())
Example #37
0
def get_channel_image_file(experiment_id, channel_id):
    """
    .. http:get:: /api/experiments/(string:experiment_id)/channels/(string:channel_id)/image-files

        Get a specific image belonging to a channel.

        **Example response**:

        .. sourcecode:: http

            HTTP/1.1 200 OK
            Content-Type: image/png

        :query plate_name: name of the plate (required)
        :query cycle_index: cycle's index (required)
        :query well_name: name of the well (required)
        :query well_pos_x: x-coordinate of the site within the well (optional)
        :query well_pos_y: y-coordinate of the site within the well (optional)
        :query tpoint: time point (required)
        :query zplane: z-plane (required)
        :query illumcorr: correct image for illumination artifacts (optional)
        :query align: align image relative to reference cycle (optional)

        :reqheader Authorization: JWT token issued by the server
        :statuscode 200: no error
        :statuscode 404: no matching image found
        :statuscode 400: not all query parameters provided

    """
    logger.info('get image of channel %d from experiment %d', channel_id,
                experiment_id)
    plate_name = request.args.get('plate_name')
    well_name = request.args.get('well_name')
    x = request.args.get('well_pos_x', type=int)
    y = request.args.get('well_pos_y', type=int)
    cycle_index = request.args.get('cycle_index', type=int)
    tpoint = request.args.get('tpoint', type=int)
    zplane = request.args.get('zplane', type=int)
    illumcorr = is_true(request.args.get('correct'))
    align = is_true(request.args.get('align'))
    with tm.utils.MainSession() as session:
        experiment = session.query(tm.ExperimentReference).get(experiment_id)
        experiment_name = experiment.name
    with tm.utils.ExperimentSession(experiment_id) as session:
        site_id = session.query(tm.Site.id).\
            join(tm.Well).\
            join(tm.Plate).\
            filter(
                tm.Plate.name == plate_name,
                tm.Well.name == well_name,
                tm.Site.x == x, tm.Site.y == y
            ).\
            one()[0]
        channel = session.query(tm.Channel).get(channel_id)
        channel_name = channel.name
        image_file = session.query(tm.ChannelImageFile).\
            join(tm.Cycle).\
            filter(
                tm.Cycle.index == cycle_index,
                tm.ChannelImageFile.site_id == site_id,
                tm.ChannelImageFile.channel_id == channel_id,
                tm.ChannelImageFile.tpoint == tpoint,
                tm.ChannelImageFile.zplane == zplane
            ).\
            one()
        img = image_file.get()
        if illumcorr:
            # TODO: cache in Redis for a limited amount of time to not having to
            # load the file repeatedly when user downloads multiple files of the
            # same channel
            logger.info('correct image for illumination artefacts')
            illumstats_file = session.query(tm.IllumstatsFile).\
                filter_by(channel_id=channel_id).\
                one_or_none()
            if illumstats_file is None:
                raise ResourceNotFoundError(
                    'No illumination statistics file found for channel %d' %
                    channel_id)
            stats = illumstats_file.get()
            img = img.correct(stats)
    if align:
        img = img.align()

    pixels = img.png_encode()
    f = StringIO()
    f.write(pixels)
    f.seek(0)
    filename = '%s_%s_%s_y%.3d_x%.3d_z%.3d_t%.3d_%s.png' % (
        experiment_name, plate_name, well_name, y, x, zplane, tpoint,
        channel_name)
    return send_file(f,
                     attachment_filename=secure_filename(filename),
                     mimetype='image/png',
                     as_attachment=True)
Example #38
0
    def print_survey_result(self):
        survey_input = self.env['survey.user_input'].search(
            [('state', 'in', ['skip', 'done']),
             ('survey_id', '=', self.survey_id.id),
             ('partner_id', '=', self.employee_id.user_id.partner_id.id)],
            limit=1,
            order='create_date ASC')
        workbook = xlwt.Workbook()
        header1 = xlwt.easyxf(
            'font: bold on, color black, name Arial; align: wrap yes, ,vert bottom ,horz centre'
        )
        title1 = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, vert centre ,horz centre'
        )
        title_total = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, horz centre; pattern: pattern solid, fore_color gray40'
        )
        name_style = xlwt.easyxf(
            'font: color black, name Arial; align: wrap yes, ,vert centre ,horz left'
        )

        worksheet = workbook.add_sheet('Sheet 1')
        name = ''
        employee_id = ''
        position = ''
        department = ''
        join_date = ''
        last_date = ''
        for record in survey_input:
            for rec in record.user_input_line_ids:
                if rec.question_id.question == 'Name' and not rec.skipped:
                    name = rec.value_free_text
                elif rec.question_id.question == 'Employee ID' and not rec.skipped:
                    employee_id = rec.value_free_text
                elif rec.question_id.question == 'Position' and not rec.skipped:
                    position = rec.value_free_text
                elif rec.question_id.question == 'Department / Section' and not rec.skipped:
                    department = rec.value_free_text
                elif rec.question_id.question == 'Start Working Date' and not rec.skipped:
                    join_date = rec.value_free_text
                elif rec.question_id.question == 'Last Working Date' and not rec.skipped:
                    last_date = rec.value_free_text
                # # name = [rec.value_free_text if rec.question_id.question == 'Name' and not rec.skipped else '-']
                # employee_id = [rec.value_free_text if rec.question_id.question == 'Employee ID' and not rec.skipped else '-']
                # position = [rec.value_free_text if rec.question_id.question == 'Position' and not rec.skipped else '-']
                # department = [rec.value_free_text if rec.question_id.question == 'Department / Section' and not rec.skipped else '-']
                # join_date = [rec.value_free_text if rec.question_id.question == 'Start Working Date' and not rec.skipped else '-']
                # last_date = [rec.value_free_text if rec.question_id.question == 'Last Working Date' and not rec.skipped else '-']
        print('name = ', name)
        print('employee_id = ', employee_id)
        print('position = ', position)
        print('department = ', department)
        print('join_date = ', join_date)
        print('last_date = ', last_date)
        worksheet.write_merge(1, 1, 0, 5, "Exit Interview Form", header1)
        worksheet.write(2, 4, 'Effective Date', name_style)
        worksheet.write_merge(3, 3, 0, 1, "Name :", title1)
        worksheet.write(3, 2, name, title1)
        worksheet.write_merge(3, 3, 3, 4, "Employee ID :", title1)
        worksheet.write(3, 5, employee_id, title1)
        worksheet.write_merge(4, 4, 0, 1, "Position :", title1)
        worksheet.write(4, 2, position, title1)
        worksheet.write_merge(4, 4, 3, 4, "Department/Section :", title1)
        worksheet.write(4, 5, department, title1)
        worksheet.write_merge(5, 5, 0, 1, "Start Working Date :", title1)
        worksheet.write(5, 2, join_date, title1)
        worksheet.write_merge(5, 5, 3, 4, "Last Working Date :", title1)
        worksheet.write(5, 5, last_date, title1)

        worksheet.write_merge(
            7, 8, 0, 5,
            "To continue to improve and make our company a better place to work, we ask you to kindly provide honest and true answers. \n Your answers will be kept confidential. Please select the answer that best applies to you.",
            title1)

        worksheet.write_merge(9, 10, 0, 3, "Desccription", title1)
        worksheet.write_merge(9, 9, 4, 8, "Satisfaction Level", title1)
        worksheet.write(10, 4, "Very High", title1)
        worksheet.write(10, 5, " High", title1)
        worksheet.write(10, 6, "Neutral", title1)
        worksheet.write(10, 7, "Low", title1)
        worksheet.write(10, 8, "Very Low", title1)
        worksheet.write_merge(11, 11, 0, 3, "Work Condition", title1)

        fp = StringIO()
        workbook.save(fp)
        fp.seek(0)
        excel_data = fp.read()
        fp.close()
        excel_data = base64.encodestring(excel_data)
        filename = 'Survey Result.xls'
        survey_wivard = self.env['survey.result.wizard'].create({
            'name':
            filename,
            'file':
            excel_data
        })
        return {
            'name': _('Survey Result Report'),
            'res_id': survey_wivard.id,
            'view_type': 'form',
            "view_mode": 'form',
            'res_model': 'survey.result.wizard',
            'type': 'ir.actions.act_window',
            'target': 'new',
        }
Example #39
0
    if zsql_brain is not None:
        try:
            class_file_, class_name_ = zsql_brain.rsplit('.', 1)
        except:
            #import pdb; pdb.post_mortem()
            raise
        brain = getBrain(class_file_, class_name_)
        # XXX remove this logging for performance
        LOG(__name__, INFO, "Using special brain: %r\n" % (brain, ))
    else:
        brain = getBrain(self.class_file_, self.class_name_)

    if type(result) is type(''):
        f = StringIO()
        f.write(result)
        f.seek(0)
        result = RDB.File(f, brain, p)
    else:
        result = Results(result, brain, p)
    columns = result._searchable_result_columns()
    if test__ and columns != self._col: self._col = columns

    # If run in test mode, return both the query and results so
    # that the template doesn't have to be rendered twice!
    if test__: return query, result

    return result


def DA_upgradeSchema(self,
                     connection_id=None,
                sheet1.write(startRow + 1, 0, u'本期代理费用合计:')
                sheet1.write(startRow + 1, 1, subtotal - purchase_total)
                sheet1.write(startRow + 2, 0, u'本期其他费用合计:')
                sheet1.write(startRow + 2, 1, cost_total)
                sheet1.write(startRow + 3, 0, u'应付金额合计:')
                sheet1.write(startRow + 3, 1, purchase_total + cost_total)

                sheet1.write(startRow + 4, 0, u'备注:')
                sheet1.write(startRow + 4, 1, u'库存数量为导出对账单时间的实时库存。')

        except Exception, ex:
            print Exception, ":", ex

        fp = StringIO()
        workbook.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()
        return data

    def from_data(self, cr, uid, lines, sequence, context=None):
        supplier_mode_dict = {
            'Direct_Procurement': u'直采',
            'Consign_stock_in': u'代售入仓',
            'Consign': u'代售不入仓',
            'Commission': u'佣金'
        }
        state_dict = {
            'draft': u'未对账',
            'checked': u'已对账',
            'settled': u'已结算',
Example #41
0
class PythonWidget(HistoryConsoleWidget):
    """ A basic in-process Python interpreter.
    """

    # Emitted when a command has been executed in the interpeter.
    executed = QtCore.Signal()

    #--------------------------------------------------------------------------
    # 'object' interface
    #--------------------------------------------------------------------------

    def __init__(self, parent=None):
        super(PythonWidget, self).__init__(parent)

        # PythonWidget attributes.
        self.locals = dict(__name__='__console__', __doc__=None)
        self.interpreter = InteractiveInterpreter(self.locals)

        # PythonWidget protected attributes.
        self._buffer = StringIO()
        self._bracket_matcher = BracketMatcher(self._control)
        self._call_tip_widget = CallTipWidget(self._control)
        self._completion_lexer = CompletionLexer(PythonLexer())
        self._hidden = False
        self._highlighter = PythonWidgetHighlighter(self)
        self._last_refresh_time = 0

        # file-like object attributes.
        self.encoding = sys.stdin.encoding

        # Configure the ConsoleWidget.
        self.tab_width = 4
        self._set_continuation_prompt('... ')

        # Configure the CallTipWidget.
        self._call_tip_widget.setFont(self.font)
        self.font_changed.connect(self._call_tip_widget.setFont)

        # Connect signal handlers.
        document = self._control.document()
        document.contentsChange.connect(self._document_contents_change)

        # Display the banner and initial prompt.
        self.reset()

    #--------------------------------------------------------------------------
    # file-like object interface
    #--------------------------------------------------------------------------

    def flush(self):
        """ Flush the buffer by writing its contents to the screen.
        """
        self._buffer.seek(0)
        text = self._buffer.getvalue()
        self._buffer.close()
        self._buffer = StringIO()

        self._append_plain_text(text)
        self._control.moveCursor(QtGui.QTextCursor.End)

    def readline(self, prompt=None):
        """ Read and return one line of input from the user.
        """
        return self._readline(prompt)

    def write(self, text, refresh=True):
        """ Write text to the buffer, possibly flushing it if 'refresh' is set.
        """
        if not self._hidden:
            self._buffer.write(text)
            if refresh:
                current_time = time()
                if current_time - self._last_refresh_time > 0.05:
                    self.flush()
                    self._last_refresh_time = current_time

    def writelines(self, lines, refresh=True):
        """ Write a list of lines to the buffer.
        """
        for line in lines:
            self.write(line, refresh=refresh)

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' abstract interface
    #---------------------------------------------------------------------------

    def _is_complete(self, source, interactive):
        """ Returns whether 'source' can be completely processed and a new
            prompt created. When triggered by an Enter/Return key press,
            'interactive' is True; otherwise, it is False.
        """
        if interactive:
            lines = source.splitlines()
            if len(lines) == 1:
                try:
                    return compile_command(source) is not None
                except:
                    # We'll let the interpeter handle the error.
                    return True
            else:
                return lines[-1].strip() == ''
        else:
            return True

    def _execute(self, source, hidden):
        """ Execute 'source'. If 'hidden', do not show any output.

        See parent class :meth:`execute` docstring for full details.
        """
        # Save the current std* and point them here
        old_stdin = sys.stdin
        old_stdout = sys.stdout
        old_stderr = sys.stderr
        sys.stdin = sys.stdout = sys.stderr = self

        # Run the source code in the interpeter
        self._hidden = hidden
        try:
            more = self.interpreter.runsource(source)
        finally:
            self._hidden = False

            # Restore std* unless the executed changed them
            if sys.stdin is self:
                sys.stdin = old_stdin
            if sys.stdout is self:
                sys.stdout = old_stdout
            if sys.stderr is self:
                sys.stderr = old_stderr

            self.executed.emit()
            self._show_interpreter_prompt()

    def _prompt_started_hook(self):
        """ Called immediately after a new prompt is displayed.
        """
        if not self._reading:
            self._highlighter.highlighting_on = True

    def _prompt_finished_hook(self):
        """ Called immediately after a prompt is finished, i.e. when some input
            will be processed and a new prompt displayed.
        """
        if not self._reading:
            self._highlighter.highlighting_on = False

    def _tab_pressed(self):
        """ Called when the tab key is pressed. Returns whether to continue
            processing the event.
        """
        # Perform tab completion if:
        # 1) The cursor is in the input buffer.
        # 2) There is a non-whitespace character before the cursor.
        text = self._get_input_buffer_cursor_line()
        if text is None:
            return False
        complete = bool(text[:self._get_input_buffer_cursor_column()].strip())
        if complete:
            self._complete()
        return not complete

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' protected interface
    #---------------------------------------------------------------------------

    def _event_filter_console_keypress(self, event):
        """ Reimplemented for smart backspace.
        """
        if event.key() == QtCore.Qt.Key_Backspace and \
                not event.modifiers() & QtCore.Qt.AltModifier:
            # Smart backspace: remove four characters in one backspace if:
            # 1) everything left of the cursor is whitespace
            # 2) the four characters immediately left of the cursor are spaces
            col = self._get_input_buffer_cursor_column()
            cursor = self._control.textCursor()
            if col > 3 and not cursor.hasSelection():
                text = self._get_input_buffer_cursor_line()[:col]
                if text.endswith('    ') and not text.strip():
                    cursor.movePosition(QtGui.QTextCursor.Left,
                                        QtGui.QTextCursor.KeepAnchor, 4)
                    cursor.removeSelectedText()
                    return True

        return super(PythonWidget, self)._event_filter_console_keypress(event)

    def _insert_continuation_prompt(self, cursor):
        """ Reimplemented for auto-indentation.
        """
        super(PythonWidget, self)._insert_continuation_prompt(cursor)
        source = self.input_buffer
        space = 0
        for c in source.splitlines()[-1]:
            if c == '\t':
                space += 4
            elif c == ' ':
                space += 1
            else:
                break
        if source.rstrip().endswith(':'):
            space += 4
        cursor.insertText(' ' * space)

    #---------------------------------------------------------------------------
    # 'PythonWidget' public interface
    #---------------------------------------------------------------------------

    def execute_file(self, path, hidden=False):
        """ Attempts to execute file with 'path'. If 'hidden', no output is
            shown.
        """

        self.execute("exec(open(%s).read())" % repr(path), hidden=hidden)

    def reset(self):
        """ Resets the widget to its initial state. Similar to ``clear``, but
            also re-writes the banner.
        """
        self._reading = False
        self._highlighter.highlighting_on = False

        self._control.clear()
        self._append_plain_text(self._get_banner())
        self._show_interpreter_prompt()

    #---------------------------------------------------------------------------
    # 'PythonWidget' protected interface
    #---------------------------------------------------------------------------

    def _call_tip(self):
        """ Shows a call tip, if appropriate, at the current cursor location.
        """
        # Decide if it makes sense to show a call tip
        cursor = self._get_cursor()
        cursor.movePosition(QtGui.QTextCursor.Left)
        if cursor.document().characterAt(cursor.position()) != '(':
            return False
        context = self._get_context(cursor)
        if not context:
            return False

        # Look up the context and show a tip for it
        symbol, leftover = self._get_symbol_from_context(context)
        doc = getattr(symbol, '__doc__', None)
        if doc is not None and not leftover:
            self._call_tip_widget.show_call_info(doc=doc)
            return True
        return False

    def _complete(self):
        """ Performs completion at the current cursor location.
        """
        context = self._get_context()
        if context:
            symbol, leftover = self._get_symbol_from_context(context)
            if len(leftover) == 1:
                leftover = leftover[0]
                if symbol is None:
                    names = self.interpreter.locals.keys()
                    names += __builtin__.__dict__.keys()
                else:
                    names = dir(symbol)
                completions = [ n for n in names if n.startswith(leftover) ]
                if completions:
                    cursor = self._get_cursor()
                    cursor.movePosition(QtGui.QTextCursor.Left,
                                        n=len(context[-1]))
                    self._complete_with_items(cursor, completions)

    def _get_banner(self):
        """ Gets a banner to display at the beginning of a session.
        """
        banner = 'Python %s on %s\nType "help", "copyright", "credits" or ' \
            '"license" for more information.'
        return banner % (sys.version, sys.platform)

    def _get_context(self, cursor=None):
        """ Gets the context for the specified cursor (or the current cursor
            if none is specified).
        """
        if cursor is None:
            cursor = self._get_cursor()
        cursor.movePosition(QtGui.QTextCursor.StartOfBlock,
                            QtGui.QTextCursor.KeepAnchor)
        text = cursor.selection().toPlainText()
        return self._completion_lexer.get_context(text)

    def _get_symbol_from_context(self, context):
        """ Find a python object in the interpeter namespace from a context (a
            list of names).
        """
        context = map(str, context)
        if len(context) == 0:
            return None, context

        base_symbol_string = context[0]
        symbol = self.interpreter.locals.get(base_symbol_string, None)
        if symbol is None:
            symbol = __builtin__.__dict__.get(base_symbol_string, None)
        if symbol is None:
            return None, context

        context = context[1:]
        for i, name in enumerate(context):
            new_symbol = getattr(symbol, name, None)
            if new_symbol is None:
                return symbol, context[i:]
            else:
                symbol = new_symbol

        return symbol, []

    def _show_interpreter_prompt(self):
        """ Shows a prompt for the interpreter.
        """
        self.flush()
        self._show_prompt('>>> ')

    #------ Signal handlers ----------------------------------------------------

    def _document_contents_change(self, position, removed, added):
        """ Called whenever the document's content changes. Display a call tip
            if appropriate.
        """
        # Calculate where the cursor should be *after* the change:
        position += added

        document = self._control.document()
        if position == self._get_cursor().position():
            self._call_tip()
Example #42
0
    def encode(self, data_source, title=None, as_stream=False, **attr):
        """
            Export data as a Microsoft Excel spreadsheet

            @param data_source: the source of the data that is to be encoded
                                as a spreadsheet, can be either of:
                                1) an S3Resource
                                2) an array of value dicts (dict of
                                   column labels as first item, list of
                                   field types as second item)
                                3) a dict like:
                                   {columns: [key, ...],
                                    headers: {key: label},
                                    types: {key: type},
                                    rows: [{key:value}],
                                    }
            @param title: the title for the output document
            @param as_stream: return the buffer (StringIO) rather than
                              its contents (str), useful when the output
                              is supposed to be stored locally
            @param attr: keyword parameters

            @keyword title: the main title of the report
            @keyword list_fields: fields to include in list views
            @keyword report_groupby: used to create a grouping of the result:
                                     either a Field object of the resource
                                     or a string which matches a value in
                                     the heading
            @keyword use_colour: True to add colour to the cells, default False
            @keyword evenodd: render different background colours
                              for even/odd rows ("stripes")
        """

        # Do not redirect from here!
        # ...but raise proper status code, which can be caught by caller
        try:
            import xlwt
        except ImportError:
            error = self.ERROR.XLWT_ERROR
            current.log.error(error)
            raise HTTP(503, body=error)
        try:
            from xlrd.xldate import xldate_from_date_tuple, \
                                    xldate_from_time_tuple, \
                                    xldate_from_datetime_tuple
        except ImportError:
            error = self.ERROR.XLRD_ERROR
            current.log.error(error)
            raise HTTP(503, body=error)

        import datetime

        MAX_CELL_SIZE = self.MAX_CELL_SIZE
        COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER

        # Get the attributes
        title = attr.get("title")
        if title is None:
            title = current.T("Report")
        list_fields = attr.get("list_fields")
        group = attr.get("dt_group")
        use_colour = attr.get("use_colour", False)
        evenodd = attr.get("evenodd", True)

        # Extract the data from the data_source
        if isinstance(data_source, dict):
            headers = data_source.get("headers", {})
            lfields = data_source.get("columns", list_fields)
            column_types = data_source.get("types")
            types = [column_types[col] for col in lfields]
            rows = data_source.get("rows")
        elif isinstance(data_source, (list, tuple)):
            headers = data_source[0]
            types = data_source[1]
            rows = data_source[2:]
            lfields = list_fields
        else:
            if not list_fields:
                list_fields = data_source.list_fields()
            (title, types, lfields, headers, rows) = self.extract(
                data_source,
                list_fields,
            )

        # Verify columns in items
        request = current.request
        if len(rows) > 0 and len(lfields) > len(rows[0]):
            msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers     %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
            current.log.error(msg)

        # Grouping
        report_groupby = lfields[group] if group else None
        groupby_label = headers[report_groupby] if report_groupby else None

        # Date/Time formats from L10N deployment settings
        settings = current.deployment_settings
        date_format = settings.get_L10n_date_format()
        date_format_str = str(date_format)

        dt_format_translate = self.dt_format_translate
        date_format = dt_format_translate(date_format)
        time_format = dt_format_translate(settings.get_L10n_time_format())
        datetime_format = dt_format_translate(
            settings.get_L10n_datetime_format())

        title_row = settings.get_xls_title_row()

        # Get styles
        styles = self._styles(
            use_colour=use_colour,
            evenodd=evenodd,
            datetime_format=datetime_format,
        )

        # Create the workbook
        book = xlwt.Workbook(encoding="utf-8")

        # Add sheets
        sheets = []
        # XLS exports are limited to 65536 rows per sheet, we bypass
        # this by creating multiple sheets
        row_limit = 65536
        sheetnum = len(rows) / row_limit
        # Can't have a / in the sheet_name, so replace any with a space
        sheet_name = str(title.replace("/", " "))
        if len(sheet_name) > 31:
            # Sheet name cannot be over 31 chars
            # (take sheet number suffix into account)
            sheet_name = sheet_name[:31] if sheetnum == 1 else sheet_name[:28]
        count = 1
        while len(sheets) <= sheetnum:
            sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
            count += 1

        if callable(title_row):
            # Calling with sheet None to get the number of title rows
            title_row_length = title_row(None)
        else:
            title_row_length = 2

        # Add header row to all sheets, determine columns widths
        header_style = styles["header"]
        for sheet in sheets:
            # Move this down if a title row will be added
            if title_row:
                header_row = sheet.row(title_row_length)
            else:
                header_row = sheet.row(0)
            column_widths = []
            has_id = False
            col_index = 0
            for selector in lfields:
                if selector == report_groupby:
                    continue
                label = headers[selector]
                if label == "Id":
                    # Indicate to adjust col_index when writing out
                    has_id = True
                    column_widths.append(0)
                    col_index += 1
                    continue
                if label == "Sort":
                    continue
                if has_id:
                    # Adjust for the skipped column
                    write_col_index = col_index - 1
                else:
                    write_col_index = col_index
                header_row.write(write_col_index, str(label), header_style)
                width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
                width = min(width, 65535)  # USHRT_MAX
                column_widths.append(width)
                sheet.col(write_col_index).width = width
                col_index += 1

        title = s3_str(title)

        # Title row (optional, deployment setting)
        if title_row:
            T = current.T
            large_header_style = styles["large_header"]
            notes_style = styles["notes"]
            for sheet in sheets:
                if callable(title_row):
                    # Custom title rows
                    title_row(sheet)
                else:
                    # First row => Title (standard = "title_list" CRUD string)
                    current_row = sheet.row(0)
                    if col_index > 0:
                        sheet.write_merge(
                            0,
                            0,
                            0,
                            col_index,
                            title,
                            large_header_style,
                        )
                    current_row.height = 500
                    # Second row => Export date/time
                    current_row = sheet.row(1)
                    current_row.write(0, "%s:" % T("Date Exported"),
                                      notes_style)
                    current_row.write(1, request.now, notes_style)
                    # Fix the size of the last column to display the date
                    if 16 * COL_WIDTH_MULTIPLIER > width:
                        sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER

        # Initialize counters
        totalCols = col_index
        # Move the rows down if a title row is included
        if title_row:
            row_index = title_row_length
        else:
            row_index = 0

        # Helper function to get the current row
        def get_current_row(row_count, row_limit):

            sheet_count = int(row_count / row_limit)
            row_number = row_count - (sheet_count * row_limit)
            if sheet_count > 0:
                row_number += 1
            return sheets[sheet_count], sheets[sheet_count].row(row_number)

        # Write the table contents
        subheading = None
        odd_style = styles["odd"]
        even_style = styles["even"]
        subheader_style = styles["subheader"]
        for row in rows:
            # Current row
            row_index += 1
            current_sheet, current_row = get_current_row(row_index, row_limit)
            style = even_style if row_index % 2 == 0 else odd_style

            # Group headers
            if report_groupby:
                represent = s3_strip_markup(s3_unicode(row[report_groupby]))
                if subheading != represent:
                    # Start of new group - write group header
                    subheading = represent
                    current_sheet.write_merge(
                        row_index,
                        row_index,
                        0,
                        totalCols,
                        subheading,
                        subheader_style,
                    )
                    # Move on to next row
                    row_index += 1
                    current_sheet, current_row = get_current_row(
                        row_index, row_limit)
                    style = even_style if row_index % 2 == 0 else odd_style

            col_index = 0
            remaining_fields = lfields

            # Custom row style?
            row_style = None
            if "_style" in row:
                stylename = row["_style"]
                if stylename in styles:
                    row_style = styles[stylename]

            # Group header/footer row?
            if "_group" in row:
                group_info = row["_group"]
                label = group_info.get("label")
                totals = group_info.get("totals")
                if label:
                    label = s3_strip_markup(s3_unicode(label))
                    style = row_style or subheader_style
                    span = group_info.get("span")
                    if span == 0:
                        current_sheet.write_merge(
                            row_index,
                            row_index,
                            0,
                            totalCols - 1,
                            label,
                            style,
                        )
                        if totals:
                            # Write totals into the next row
                            row_index += 1
                            current_sheet, current_row = \
                                get_current_row(row_index, row_limit)
                    else:
                        current_sheet.write_merge(
                            row_index,
                            row_index,
                            0,
                            span - 1,
                            label,
                            style,
                        )
                        col_index = span
                        remaining_fields = lfields[span:]
                if not totals:
                    continue

            for field in remaining_fields:
                label = headers[field]
                if label == groupby_label:
                    continue
                if label == "Id":
                    # Skip the ID column from XLS exports
                    col_index += 1
                    continue

                if field not in row:
                    represent = ""
                else:
                    represent = s3_strip_markup(s3_unicode(row[field]))

                coltype = types[col_index]
                if coltype == "sort":
                    continue
                if len(represent) > MAX_CELL_SIZE:
                    represent = represent[:MAX_CELL_SIZE]
                value = represent
                if coltype == "date":
                    try:
                        cell_datetime = datetime.datetime.strptime(
                            value, date_format_str)
                        date_tuple = (cell_datetime.year, cell_datetime.month,
                                      cell_datetime.day)
                        value = xldate_from_date_tuple(date_tuple, 0)
                        style.num_format_str = date_format
                    except:
                        pass
                elif coltype == "datetime":
                    try:
                        cell_datetime = datetime.datetime.strptime(
                            value, date_format_str)
                        date_tuple = (cell_datetime.year, cell_datetime.month,
                                      cell_datetime.day, cell_datetime.hour,
                                      cell_datetime.minute,
                                      cell_datetime.second)
                        value = xldate_from_datetime_tuple(date_tuple, 0)
                        style.num_format_str = datetime_format
                    except:
                        pass
                elif coltype == "time":
                    try:
                        cell_datetime = datetime.datetime.strptime(
                            value, date_format_str)
                        date_tuple = (cell_datetime.hour, cell_datetime.minute,
                                      cell_datetime.second)
                        value = xldate_from_time_tuple(date_tuple)
                        style.num_format_str = time_format
                    except:
                        pass
                elif coltype == "integer":
                    try:
                        value = int(value)
                        style.num_format_str = "0"
                    except:
                        pass
                elif coltype == "double":
                    try:
                        value = float(value)
                        style.num_format_str = "0.00"
                    except:
                        pass
                if has_id:
                    # Adjust for the skipped column
                    write_col_index = col_index - 1
                else:
                    write_col_index = col_index

                current_row.write(write_col_index, value, style)
                width = len(represent) * COL_WIDTH_MULTIPLIER
                if width > column_widths[col_index]:
                    column_widths[col_index] = width
                    current_sheet.col(write_col_index).width = width
                col_index += 1

        # Additional sheet settings
        for sheet in sheets:
            sheet.panes_frozen = True
            sheet.horz_split_pos = 1

        # Write output
        output = StringIO()
        book.save(output)
        output.seek(0)

        if as_stream:
            return output

        # Response headers
        filename = "%s_%s.xls" % (request.env.server_name, title)
        disposition = "attachment; filename=\"%s\"" % filename
        response = current.response
        response.headers["Content-Type"] = contenttype(".xls")
        response.headers["Content-disposition"] = disposition

        return output.read()
Example #43
0
    def generate_excel_do(self):
        """
        Membuatkan journal untuk pembayran expense
        """
        self.ensure_one()
        style_default = xlwt.easyxf('font: height 240')
        style_header = xlwt.easyxf('font: height 280, bold on')
        style_bold = xlwt.easyxf(
            'font: height 240, bold on; align: horz center; '
            'borders: left thin, top thin, bottom thin, right thin')
        style_table = xlwt.easyxf(
            'font: height 240; borders: left thin, bottom thin, right thin')

        wb = xlwt.Workbook("UTF-8")
        ws = wb.add_sheet('Invoice')
        ws.footer_str = ''
        title = "D.O. GUDANG"

        y = 0
        x = 0

        ws.col(x).width = 5000
        ws.col(x + 1).width = 12000
        ws.col(x + 2).width = 6000

        ws.row(0).height_mismatch = 1
        ws.row(0).height = 300
        ws.row(1).height_mismatch = 1
        ws.row(1).height = 280
        ws.row(2).height_mismatch = 1
        ws.row(2).height = 280
        ws.row(3).height_mismatch = 1
        ws.row(3).height = 280
        ws.row(4).height_mismatch = 1
        ws.row(4).height = 280
        ws.row(5).height_mismatch = 1
        ws.row(5).height = 280

        # ws.col(x + 3).width = 4500
        # ws.col(x + 4).width = 6000

        ws.write(y,
                 x,
                 "{} {}".format(title, self.nomor_urut),
                 style=style_header)
        y += 1
        ws.write(y, x, "Tanggal Invoice", style=style_default)
        ws.write(y, x + 1, self.date_invoice, style=style_default)
        y += 1
        ws.write(y, x, "Customer", style=style_default)
        ws.write(y, x + 1, self.partner_id.name, style=style_default)
        y += 1
        street_name = ""
        if self.partner_id.street:
            street_name = self.partner_id.street
        ws.write(y, x, "Alamat", style=style_default)
        ws.write(y, x + 1, street_name, style=style_default)
        y += 2

        ws.write(y, x, "No", style=style_bold)
        ws.write(y, x + 1, "Nama Barang", style=style_bold)
        ws.write(y, x + 2, "QTY", style=style_bold)
        # ws.write(y, x + 3, "Harga/@", style=style_bold)
        # ws.write(y, x + 4, "Pajak", style=style_bold)
        # ws.write(y, x + 5, "Subtotal tanpa pajak", style=style_bold)
        y += 1

        idx = 0
        sum_qty = sum(self.invoice_line_ids.mapped("quantity"))
        for inv_line_id in self.invoice_line_ids:
            ws.row(y).height_mismatch = 1
            ws.row(y).height = 320
            idx += 1
            tax_name = ""
            for tax_id in inv_line_id.invoice_line_tax_ids:
                tax_name += tax_id.name
            ws.write(y, x, idx, style=style_table)
            ws.write(y, x + 1, inv_line_id.product_id.name, style=style_table)
            ws.write(y, x + 2, inv_line_id.quantity, style=style_table)
            # ws.write(y, x + 3, inv_line_id.price_unit, style=style_table)
            # ws.write(y, x + 4, tax_name, style=style_table)
            # ws.write(y, x + 5, inv_line_id.quantity * inv_line_id.price_unit, style=style_table)
            y += 1

        ws.row(y).height = 320
        ws.write(y,
                 x + 1,
                 "Jml. Qty:",
                 style=xlwt.easyxf('font: height 240; align: horiz right'))
        ws.write(y, x + 2, sum_qty, style=style_table)
        y += 3
        ws.write(
            y,
            x,
            "Adm. Penjualan,         Pengambil               Mengetahui, ",
            style=style_default)
        y += 3
        ws.write(y,
                 x,
                 "(____________)       (___________)         (___________)",
                 style=style_default)

        fp = StringIO()
        wb.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()

        return self.env["ss.download"].download(
            "Invoice_{}.xls".format(self.nomor_urut), data)
Example #44
0
def safeCopyAndMd5(req, fileobj, destinationPath, repoId, uploader, b64=False, content_range=False):
    '''copy a file in chunked mode safely'''

    destDir = path.dirname(destinationPath)
    extsp = destinationPath.split('.')
    if len(extsp) > 1:
        ext = extsp[1]
    else:
        ext = 'raw'
    checkPath(destDir)

    ''' if available asset, then we need to symblink it if asset uuid if different than available one!'''

    start_byte = 0
    in_progress = False
    if content_range:
        in_progress = True
        _bp = content_range.split()
        # a simple validation:
        if len(_bp) == 2 and len(_bp[1].split('/')) == 2:
            _bd = _bp[1].split('/')
            start_byte, eb = map(int, _bd[0].split('-'))
            tb = int(_bd[-1])
            if tb == eb + 1:
                in_progress = False

    if os.path.islink(destinationPath):
        os.remove(destinationPath)

    if not start_byte:  # if its a new file
        if os.path.isfile(destinationPath):
            os.remove(destinationPath)

    with open(destinationPath, 'a+') as f:
        md5 = hashlib.md5()
        if b64:
            b = StringIO()
            decode(fileobj, b)
            b.seek(0)
            fileobj = b
        while True:
            chunk = fileobj.read(2 ** 24)  # 4 megs
            # chunk = fileobj.read(1024) ## 1Kb
            if not chunk:
                break
            md5.update(chunk)
            f.write(chunk)
    if in_progress and not start_byte:
        return 'IN_PROGRESS_NEW'

    if in_progress:
        return 'IN_PROGRESS'

    dataMd5 = md5.hexdigest()
    # check if there is an asset with same key
    if not repoId:
        availableAsset = Asset.query.filter_by(key=dataMd5).first()
    else:
        availableAsset = req.session.query(Asset).filter_by(key=dataMd5).join(
            Collection).filter_by(repository_id=repoId).first()

    '''First lets clean asset is there no files linked to it'''
    if availableAsset:
        if not os.path.isfile(availableAsset.full_path):
            req.session.delete(availableAsset)

        elif not availableAsset.full_path == destinationPath:
            os.remove(destinationPath)  # we dont need it anymore
            os.symlink(availableAsset.full_path, destinationPath)
            # print 'Symblink: %s generated' % destinationPath

    return dataMd5
Example #45
0
    def encode(self, resource, **attr):
        """
            API Method to encode a resource as cards

            @param resource: the S3Resource, or
                             - the data items as list [{fieldname: representation, ...}, ...], or
                             - a callable that produces such a list of items
            @param attr: additional encoding parameters (see below)

            @keyword layout: the layout (a S3PDFCardLayout subclass, overrides
                             the resource's pdf_card_layout setting
            @keyword orderby: orderby-expression for data extraction, overrides
                              the resource's orderby setting
            @keyword labels: the labels for the fields,
                             - a dict {colname: label}, or
                             - a callable that produces it,
                             - defaults to the labels of the extracted fields
            @keyword pagesize: the PDF page size,
                               - a string "A4" or "Letter", or
                               - a tuple (width, height), in points
                               - defaults to the layout's card size
            @keyword margins: the page margins,
                              - a tuple (N, E, S, W), in points, or
                              - a single number, in points
                              - will be computed if omitted
            @keyword spacing: the spacing between cards,
                              - a tuple (H, V), in points, or
                              - a single number, in points
                              - defaults to 18 points in both directions
            @keyword title: the document title,
                            - defaults to title_list crud string of the resource

            @return: a handle to the output
        """

        if not REPORTLAB:
            # FIXME is this the correct handling of a dependency failure?
            raise HTTP(503, "Python ReportLab library not installed")

        # Do we operate on a S3Resource?
        is_resource = isinstance(resource, S3Resource)

        # The card layout
        layout = attr.get("layout")
        if layout is None and is_resource:
            layout = resource.get_config("pdf_card_layout")
        if layout is None:
            layout = S3PDFCardLayout

        # Card (and hence page) orientation
        orientation = layout.orientation
        if orientation == "Landscape":
            orientation = landscape
        else:
            orientation = portrait

        # Card and page size
        cardsize = orientation(layout.cardsize)
        pagesize = attr.get("pagesize")
        if pagesize == "A4":
            pagesize = A4
        elif pagesize == "Letter":
            pagesize = LETTER
        elif not isinstance(pagesize, (tuple, list)):
            pagesize = cardsize
        pagesize = orientation(pagesize)

        # Extract the data
        if is_resource:
            # Extract the data items from the resource
            fields = layout.fields(resource)
            data = self.extract(resource, fields, orderby=attr.get("orderby"))
            items = data.rows
        elif callable(resource):
            # External getter => call with resource, returns the data items
            data = None
            items = resource()
        else:
            # The data items have been passed-in in place of the resource
            data = None
            items = resource

        # Get the labels
        labels = attr.get("labels")
        if callable(labels):
            labels = labels(resource)
        elif not isinstance(labels, dict):
            if data and hasattr(data, "rfields"):
                # Collect the labels from rfields
                rfields = data.rfields
                labels = {rfield.colname: rfield.label for rfield in rfields}
            else:
                labels = {}

        # Document title
        title = attr.get("title")
        if not title and is_resource:
            crud_strings = current.response.s3.crud_strings[resource.tablename]
            if crud_strings:
                title = crud_strings["title_list"]

        # Instantiate the doc template
        doc = S3PDFCardTemplate(
            pagesize,
            cardsize,
            margins=attr.get("margins"),
            spacing=attr.get("spacing"),
            title=title,
        )

        # Produce the flowables
        flowables = self.get_flowables(
            layout,
            resource,
            items,
            labels=labels,
            cards_per_page=doc.cards_per_page,
        )

        # Build the doc
        output_stream = StringIO()
        doc.build(
            flowables,
            output_stream,
            #canvasmaker=canvas.Canvas,   # is default
        )

        output_stream.seek(0)
        return output_stream
Example #46
0
def save_image(ax=None):
    image_data = StringIO()
    plt.savefig(image_data, format='svg', ax=ax)
    image_data.seek(0)
    return image_data
Example #47
0
    def __call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw):
        """Call the database method

        The arguments to the method should be passed via keyword
        arguments, or in a single mapping object. If no arguments are
        given, and if the method was invoked through the Web, then the
        method will try to acquire and use the Web REQUEST object as
        the argument mapping.

        The returned value is a sequence of record objects.
        """

        __traceback_supplement__ = (SQLMethodTracebackSupplement, self)

        if REQUEST is None:
            if kw:
                REQUEST = kw
            else:
                if hasattr(self, 'REQUEST'):
                    REQUEST = self.REQUEST
                else:
                    REQUEST = {}

        # connection hook
        c = self.connection_id
        # for backwards compatability
        hk = self.connection_hook
        # go get the connection hook and call it
        if hk:
            c = getattr(self, hk)()

        try:
            dbc = getattr(self, c)
        except AttributeError:
            raise AttributeError, (
                "The database connection <em>%s</em> cannot be found." % (c))

        try:
            DB__ = dbc()
        except:
            raise DatabaseError, ('%s is not connected to a database' %
                                  self.id)

        if hasattr(self, 'aq_parent'):
            p = self.aq_parent
        else:
            p = None

        argdata = self._argdata(REQUEST)
        argdata['sql_delimiter'] = '\0'
        argdata['sql_quote__'] = dbc.sql_quote__

        security = getSecurityManager()
        security.addContext(self)
        try:
            try:
                query = apply(self.template, (p, ), argdata)
            except TypeError, msg:
                msg = str(msg)
                if string.find(msg, 'client') >= 0:
                    raise NameError("'client' may not be used as an " +
                                    "argument name in this context")
                else:
                    raise
        finally:
            security.removeContext(self)

        if src__:
            return query

        if self.cache_time_ > 0 and self.max_cache_ > 0:
            result = self._cached_result(DB__, query, self.max_rows_, c)
        else:
            result = DB__.query(query, self.max_rows_)

        if hasattr(self, '_v_brain'):
            brain = self._v_brain
        else:
            brain = self._v_brain = getBrain(self.class_file_,
                                             self.class_name_)

        if type(result) is type(''):
            f = StringIO()
            f.write(result)
            f.seek(0)
            result = File(f, brain, p, None)
        else:
            result = Results(result, brain, p, None)
        columns = result._searchable_result_columns()
        if test__ and columns != self._col:
            self._col = columns

        # If run in test mode, return both the query and results so
        # that the template doesn't have to be rendered twice!
        if test__:
            return query, result

        return result
Example #48
0
    def generate_excel(self):
        """
        Membuatkan journal untuk pembayran expense
        """
        self.ensure_one()
        style_default = xlwt.easyxf('font: height 240')
        style_header = xlwt.easyxf('font: height 280, bold on')
        style_bold = xlwt.easyxf(
            'font: bold on; align: horz center; '
            'borders: left thin, top thin, bottom thin, right thin')
        style_table = xlwt.easyxf(
            'font: height 240; borders: left thin, bottom thin, right thin')

        wb = xlwt.Workbook("UTF-8")
        ws = wb.add_sheet('Invoice')
        ws.footer_str = ''
        title = ""

        if self.jenis_inv == 'purchase':
            title = 'Pembelian'
        elif self.jenis_inv == 'invoice':
            title = 'Faktur Penjualan'
        elif self.jenis_inv == 'sangu':
            title = 'Sangu'
        elif self.jenis_inv == 'rent':
            title = 'Rent'
        elif self.jenis_inv == 'fee':
            title = 'Fee Sales'

        y = 0
        x = 0

        ws.col(x).width = 600
        ws.col(x + 1).width = 6200
        ws.col(x + 2).width = 2100
        ws.col(x + 3).width = 6000
        ws.col(x + 4).width = 4200
        ws.col(x + 5).width = 6000
        ws.row(0).height_mismatch = 1
        ws.row(0).height = 300
        ws.row(1).height_mismatch = 1
        ws.row(1).height = 280
        ws.row(2).height_mismatch = 1
        ws.row(2).height = 280
        ws.row(3).height_mismatch = 1
        ws.row(3).height = 280
        ws.row(4).height_mismatch = 1
        ws.row(4).height = 280
        ws.row(5).height_mismatch = 1
        ws.row(5).height = 280

        ws.write(y,
                 x,
                 "{} {}".format(title, self.nomor_urut),
                 style=style_header)
        y += 1
        ws.write(y, x, "Tanggal Invoice", style=style_default)
        ws.write(y, x + 2, self.date_invoice, style=style_default)
        y += 1
        ws.write(y, x, "Customer", style=style_default)
        ws.write(y, x + 2, self.partner_id.name, style=style_default)
        y += 1
        street_name = ""
        if self.partner_id.street:
            street_name = self.partner_id.street
        ws.write(y, x, "Alamat", style=style_default)
        ws.write(y, x + 2, street_name, style=style_default)
        y += 2

        ws.write(y, x, "No", style=style_bold)
        ws.write(y, x + 1, "Nama Barang", style=style_bold)
        ws.write(y, x + 2, "QTY", style=style_bold)
        ws.write(y, x + 3, "Harga/@", style=style_bold)
        ws.write(y, x + 4, "Pajak", style=style_bold)
        ws.write(y, x + 5, "Subtotal tanpa pajak", style=style_bold)
        y += 1

        idx = 0
        for inv_line_id in self.invoice_line_ids:
            ws.row(y).height_mismatch = 1
            ws.row(y).height = 320
            idx += 1
            tax_name = ""
            for tax_id in inv_line_id.invoice_line_tax_ids:
                tax_name += tax_id.name
            ws.write(y, x, idx, style=style_table)
            ws.write(y, x + 1, inv_line_id.product_id.name, style=style_table)
            ws.write(y, x + 2, inv_line_id.quantity, style=style_table)
            ws.write(y, x + 3, inv_line_id.price_unit, style=style_table)
            ws.write(y, x + 4, tax_name, style=style_table)
            ws.write(y,
                     x + 5,
                     inv_line_id.quantity * inv_line_id.price_unit,
                     style=style_table)
            y += 1

        ws.row(y).height = 320
        ws.write(y,
                 x,
                 "Pembayaran dgn cek/giro, dianggap sah jika telah diuangkan",
                 style=style_table)
        ws.write(y, x + 4, "Subtotal", style=style_table)
        ws.write(y, x + 5, self.amount_untaxed, style=style_table)
        y += 1
        ws.row(y).height = 320
        ws.write(y, x + 4, "Taxes", style=style_table)
        ws.write(y, x + 5, self.amount_tax, style=style_table)
        y += 1
        ws.row(y).height = 320
        ws.write(y, x + 4, "Total", style=style_table)
        ws.write(y, x + 5, self.amount_total, style=style_table)
        y += 3
        ws.write(y, x + 1, "Adm. Penjualan,", style=style_default)
        y += 3
        ws.write(y, x + 1, "(______________)", style=style_default)

        fp = StringIO()
        wb.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()

        return self.env["ss.download"].download(
            "Invoice_{}.xls".format(self.nomor_urut), data)
Example #49
0
def magic_insert(curs,
                 tablename,
                 data,
                 fields=None,
                 use_insert=0,
                 quoted_table=False):
    r"""Copy/insert a list of dict/list data to database.

    If curs == None, then the copy or insert statements are returned
    as string.  For list of dict the field list is optional, as its
    possible to guess them from dict keys.

    Example:
    >>> magic_insert(None, 'tbl', [[1, '1'], [2, '2']], ['col1', 'col2'])
    'COPY public.tbl (col1,col2) FROM STDIN;\n1\t1\n2\t2\n\\.\n'
    """
    if len(data) == 0:
        return

    # decide how to process
    if hasattr(data[0], 'keys'):
        if fields == None:
            fields = data[0].keys()
        if use_insert:
            row_func = _gen_dict_insert
        else:
            row_func = _gen_dict_copy
    else:
        if fields == None:
            raise Exception("Non-dict data needs field list")
        if use_insert:
            row_func = _gen_list_insert
        else:
            row_func = _gen_list_copy

    qfields = [skytools.quote_ident(f) for f in fields]
    if quoted_table:
        qtablename = tablename
    else:
        qtablename = skytools.quote_fqident(tablename)

    # init processing
    buf = StringIO()
    if curs == None and use_insert == 0:
        fmt = "COPY %s (%s) FROM STDIN;\n"
        buf.write(fmt % (qtablename, ",".join(qfields)))

    # process data
    for row in data:
        buf.write(row_func(qtablename, row, fields, qfields))
        buf.write("\n")

    # if user needs only string, return it
    if curs == None:
        if use_insert == 0:
            buf.write("\\.\n")
        return buf.getvalue()

    # do the actual copy/inserts
    if use_insert:
        curs.execute(buf.getvalue())
    else:
        buf.seek(0)
        hdr = "%s (%s)" % (qtablename, ",".join(qfields))
        curs.copy_from(buf, hdr)
Example #50
0
class Stats(object):
    """Wrapper around pstats.Stats class."""

    IGNORE_FUNC_NAMES = ['function', '']
    DEFAULT_SORT_ARG = 'cumulative'
    SORT_ARGS = {
        'ncalls': 'calls',
        'tottime': 'time',
        'cumtime': 'cumulative',
        'filename': 'module',
        'lineno': 'nfl',
    }

    STATS_LINE_REGEX = r'(.*)\((.*)\)$'
    HEADER_LINE_REGEX = r'ncalls|tottime|cumtime'

    def __init__(self, profile_output=None, profile_obj=None):
        self.profile = profile_output or profile_obj
        self.stream = StringIO()
        self.stats = pstats.Stats(self.profile, stream=self.stream)

    def read_stream(self):
        value = self.stream.getvalue()
        self.stream.seek(0)
        self.stream.truncate()
        return value

    def read(self):
        output = self.read_stream()
        lines = output.splitlines(True)
        return "".join(map(self.process_line, lines))

    @classmethod
    def process_line(cls, line):
        # Format header lines (such that clicking on a column header sorts by
        # that column).
        if re.search(cls.HEADER_LINE_REGEX, line):
            for key, val in cls.SORT_ARGS.items():
                url_link = bottle.template(
                    "<a href='{{ url }}'>{{ key }}</a>",
                    url=cls.get_updated_href(SORT_KEY, val),
                    key=key)
                line = line.replace(key, url_link)
        # Format stat lines (such that clicking on the function name drills into
        # the function call).
        match = re.search(cls.STATS_LINE_REGEX, line)
        if match:
            prefix = match.group(1)
            func_name = match.group(2)
            if func_name not in cls.IGNORE_FUNC_NAMES:
                url_link = bottle.template(
                    "<a href='{{ url }}'>{{ func_name }}</a>",
                    url=cls.get_updated_href(FUNC_NAME_KEY, func_name),
                    func_name=func_name)
                line = bottle.template(
                    "{{ prefix }}({{ !url_link }})\n",
                    prefix=prefix, url_link=url_link)
        return line

    @classmethod
    def get_updated_href(cls, key, val):
        href = '?'
        query = dict(bottle.request.query)
        query[key] = val
        for key in query.keys():
            href += '%s=%s&' % (key, query[key])
        return href[:-1]

    def show(self, restriction=''):
        self.stats.print_stats(restriction)
        return self

    def show_callers(self, func_name):
        self.stats.print_callers(func_name)
        return self

    def show_callees(self, func_name):
        self.stats.print_callees(func_name)
        return self

    def sort(self, sort=''):
        sort = sort or self.DEFAULT_SORT_ARG
        self.stats.sort_stats(sort)
        return self
Example #51
0
def captcha_image(request, key, scale=1):
    try:
        store = CaptchaStore.objects.get(hashkey=key)
    except CaptchaStore.DoesNotExist:
        # HTTP 410 Gone status so that crawlers don't index these expired urls.
        return HttpResponse(status=410)

    text = store.challenge

    if isinstance(settings.CAPTCHA_FONT_PATH, six.string_types):
        fontpath = settings.CAPTCHA_FONT_PATH
    elif isinstance(settings.CAPTCHA_FONT_PATH, (list, tuple)):
        fontpath = random.choice(settings.CAPTCHA_FONT_PATH)
    else:
        raise ImproperlyConfigured(
            'settings.CAPTCHA_FONT_PATH needs to be a path to a font or list of paths to fonts'
        )

    if fontpath.lower().strip().endswith('ttf'):
        font = ImageFont.truetype(fontpath, settings.CAPTCHA_FONT_SIZE * scale)
    else:
        font = ImageFont.load(fontpath)

    if settings.CAPTCHA_IMAGE_SIZE:
        size = settings.CAPTCHA_IMAGE_SIZE
    else:
        size = getsize(font, text)
        size = (size[0] * 2, int(size[1] * 1.4))

    image = makeimg(size)

    try:
        PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION))
    except:
        PIL_VERSION = 116
    xpos = 2

    charlist = []
    for char in text:
        if char in settings.CAPTCHA_PUNCTUATION and len(charlist) >= 1:
            charlist[-1] += char
        else:
            charlist.append(char)
    for char in charlist:
        fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR)
        charimage = Image.new('L', getsize(font, ' %s ' % char), '#000000')
        chardraw = ImageDraw.Draw(charimage)
        chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
        if settings.CAPTCHA_LETTER_ROTATION:
            if PIL_VERSION >= 116:
                charimage = charimage.rotate(
                    random.randrange(*settings.CAPTCHA_LETTER_ROTATION),
                    expand=0,
                    resample=Image.BICUBIC)
            else:
                charimage = charimage.rotate(
                    random.randrange(*settings.CAPTCHA_LETTER_ROTATION),
                    resample=Image.BICUBIC)
        charimage = charimage.crop(charimage.getbbox())
        maskimage = Image.new('L', size)

        maskimage.paste(charimage, (xpos, from_top, xpos + charimage.size[0],
                                    from_top + charimage.size[1]))
        size = maskimage.size
        image = Image.composite(fgimage, image, maskimage)
        xpos = xpos + 2 + charimage.size[0]

    if settings.CAPTCHA_IMAGE_SIZE:
        # centering captcha on the image
        tmpimg = makeimg(size)
        tmpimg.paste(image,
                     (int((size[0] - xpos) / 2),
                      int((size[1] - charimage.size[1]) / 2 - from_top)))
        image = tmpimg.crop((0, 0, size[0], size[1]))
    else:
        image = image.crop((0, 0, xpos + 1, size[1]))
    draw = ImageDraw.Draw(image)

    for f in settings.noise_functions():
        draw = f(draw, image)
    for f in settings.filter_functions():
        image = f(image)

    out = StringIO()
    image.save(out, "PNG")
    out.seek(0)

    response = HttpResponse(content_type='image/png')
    response.write(out.read())
    response['Content-length'] = out.tell()

    return response
Example #52
0
class CopyPipe(object):
    "Splits one big COPY to chunks."

    def __init__(self,
                 dstcurs,
                 tablename=None,
                 limit=512 * 1024,
                 sql_from=None):
        self.tablename = tablename
        self.sql_from = sql_from
        self.dstcurs = dstcurs
        self.buf = StringIO()
        self.limit = limit
        #hook for new data, hook func should return new data
        #def write_hook(obj, data):
        #   return data
        self.write_hook = None
        #hook for flush, hook func result is discarded
        # def flush_hook(obj):
        #   return None
        self.flush_hook = None
        self.total_rows = 0
        self.total_bytes = 0

    def write(self, data):
        "New data from psycopg"
        if self.write_hook:
            data = self.write_hook(self, data)

        self.total_bytes += len(data)
        self.total_rows += data.count("\n")

        if self.buf.tell() >= self.limit:
            pos = data.find('\n')
            if pos >= 0:
                # split at newline
                p1 = data[:pos + 1]
                p2 = data[pos + 1:]
                self.buf.write(p1)
                self.flush()

                data = p2

        self.buf.write(data)

    def flush(self):
        "Send data out."

        if self.flush_hook:
            self.flush_hook(self)

        if self.buf.tell() <= 0:
            return

        self.buf.seek(0)
        if self.sql_from:
            self.dstcurs.copy_expert(self.sql_from, self.buf)
        else:
            self.dstcurs.copy_from(self.buf, self.tablename)
        self.buf.seek(0)
        self.buf.truncate()
Example #53
0
def Pack(folderPath, compressionLevel=compressionLevel, useTmpFile=useTmpFile):
    """Takes absolute folder path with folder ending on " FbRB" and creates an fbrb archive in the same directory."""

    folderPath = lp(folderPath)
    if not os.path.isdir(folderPath) or folderPath[-5:] != " FbRB": return
    PrintPath(folderPath)  ###################

    topLevelLength = len(
        folderPath) + 1  # For the RELATIVE pathnames to put in the FbRB.

    strings = ""  # The list of strings at the beginning of the metadata. To be filled out.
    filetypeDic = dict(
    )  # filetype vs offset. Keep track of all filetypes to omit string duplicates in the metadata.
    entries = ""  # 24 bytes each, 6 ints. To be filled out.
    fileCount = 0
    payloadOffset = 0  # Where the uncompressed payload starts; sum of all filelengths so far.

    if useTmpFile: payload = tempfile.TemporaryFile()
    else: payload = StringIO()
    if compressionLevel:
        payloadHandle = gzip.GzipFile(fileobj=payload,
                                      mode="wb",
                                      compresslevel=compressionLevel,
                                      filename="")
    else:
        payloadHandle = payloadStream

    for dir0, dirs, fnames in os.walk(folderPath):
        for fname in fnames:
            # Validate file.
            rawFilename, extension = os.path.splitext(fname)
            extension = extension[1:].lower()
            if extension not in validExtensions: continue
            fileCount += 1

            # Determine the filename (including relative path) to use inside the archive.
            # Restore filename extensions to res, dbx, bin, dbmanifest.
            relativeDir = dir0.replace("\\", "/")[topLevelLength:]
            if relativeDir: relativeDir += "/"
            if extension == "dbxdeleted":
                relativePath = relativeDir + fname[:-7]
            elif extension in ("dbx", "bin", "dbmanifest"):
                relativePath = relativeDir + fname
            else:
                relativePath = relativeDir + rawFilename + ".res"
            relativePath = str(relativePath)  # Fix unicode issues.

            # Write path to strings section.
            pathOffset = len(strings)  # Current offset in the strings section.
            strings += relativePath + "\0"

            # Write filetype to strings section.
            # Check if the filetype has been used before. If so, refer to the string already in use.
            filetype = validExtensions[extension]
            if filetype in filetypeDic:
                filetypeOffset = filetypeDic[filetype]
            else:
                filetypeOffset = len(strings)
                filetypeDic[filetype] = filetypeOffset
                strings += filetype + "\x00"

            undeleteFlag = 0 if extension.endswith("deleted") else 0x10000

            # Write to payload section and retrieve payload size along the way.
            f = open(dir0 + "\\" + fname, "rb")
            payloadHandle.write(f.read())
            payloadSize = f.tell()
            f.close()

            # Add the meta entry.
            entries += pack(">6I", pathOffset, undeleteFlag, payloadOffset,
                            payloadSize, payloadSize, filetypeOffset)
            payloadOffset += payloadSize

    if compressionLevel:
        zippedFlag = "\x01"
        payloadHandle.close(
        )  # The payload stream itself now contains all the data so the additional handle is not needed anymore.
    else:
        zippedFlag = "\x00"

    # Make decompressed metadata, then compress it and store it as *metadata* variable.
    metaStream = StringIO()
    zippedMetaHandle = gzip.GzipFile(fileobj=metaStream,
                                     mode="wb",
                                     compresslevel=compressionLevel or 1)
    zippedMetaHandle.write("\x00\x00\x00\x02" + pack(">I", len(strings)) +
                           strings + pack(">I", fileCount) + entries +
                           zippedFlag + pack(">I", payloadOffset))
    zippedMetaHandle.close()
    metadata = metaStream.getvalue()
    metaStream.close()

    # Create the fbrb archive.
    out = open(folderPath[:-5] + ".fbrb", "wb")
    payload.seek(0)
    out.write("FbRB" + pack(">I", len(metadata)))
    out.write(metadata)
    while 1:
        buff = payload.read(BUFFSIZE)
        if buff: out.write(buff)
        else: break
    out.close()
    payload.close()
Example #54
0
class DataWriter(object):
    def __init__(self, fp, debug=0):
        self.fp = fp
        self.bpos = 0
        self.buff = 0
        self.fpstack = []
        self.debug = debug
        return

    def push(self):
        self.fpstack.append(self.fp)
        self.fp = StringIO()
        return

    def pop(self):
        assert self.fpstack, 'empty fpstack'
        self.fp.seek(0)
        data = self.fp.read()
        self.fp = self.fpstack.pop()
        return data

    def close(self):
        self.finishbits()
        assert not self.fpstack, 'fpstack not empty'
        return

    # fixed bytes write

    def write(self, *args):
        for x in args:
            self.fp.write(x)
        return

    def writeui8(self, *args):
        for x in args:
            self.fp.write(chr(x))
        return

    def writesi8(self, *args):
        for x in args:
            self.fp.write(pack('<b', x))
        return

    def writeui16(self, *args):
        for x in args:
            self.fp.write(pack('<H', x))
        return

    def writeub16(self, *args):
        for x in args:
            self.fp.write(pack('>H', x))
        return

    def writesi16(self, *args):
        for x in args:
            self.fp.write(pack('<h', x))
        return

    def writeub24(self, *args):
        for x in args:
            self.fp.write(pack('>L', x)[1:4])
        return

    def writeui32(self, *args):
        for x in args:
            self.fp.write(pack('<L', x))
        return

    def writeub32(self, *args):
        for x in args:
            self.fp.write(pack('>L', x))
        return

    def writergb(self, (r, g, b)):
        self.writeui8(r, g, b)
        return
Example #55
0
 def dumps(self, arg, proto=0):
     f = StringIO()
     p = cPickle.Pickler(f, proto)
     p.dump(arg)
     f.seek(0)
     return f.read()
Example #56
0
def Unpack(archivePath, targetFolder=unpackFolder):
    """Takes absolute file path of an fbrb archive and extracts all contents into the target folder, with an additional " FbRB" suffix.
    If targetFolder is an empty string, extract in the directory of the original files."""

    archivePath = lp(archivePath)
    # Check that archivePath leads to a valid FbRB archive.
    if archivePath[-5:].lower() != ".fbrb": return
    f = open(archivePath, "rb")
    if f.read(4) != "FbRB":
        f.close()
        return
    PrintPath(archivePath)  ###################

    # Determine the target folder for later on.
    if targetFolder:
        targetFolder = os.path.normpath(lp(targetFolder) +
                                        " FbRB")  # Use the given unpackFolder.
    else:
        targetFolder = os.path.normpath(
            archivePath[:-5] +
            " FbRB")  # Create folder in the same directory as archive.
    MakeLongDirs(
        targetFolder
    )  # Create something here in case the fbrb is completely empty.

    # Read the archive. An fbrb archive contains two gzipped files (metadata and payload).
    compressedMetaSize = unpack(">I", f.read(4))[0]
    zippedMetadata, zippedPayload = StringIO(
        f.read(compressedMetaSize)), StringIO(f.read())
    f.close()

    # Unzip meta in memory and make it accessible as a stream.
    metadataHandle = gzip.GzipFile(mode='rb', fileobj=zippedMetadata)
    metadata = StringIO(metadataHandle.read())
    metadataHandle.close()

    # Parse metadata.
    unknown2, stringSize = unpack(">II", metadata.read(8))
    assert unknown2 == 2
    metadata.seek(stringSize, 1)
    fileCount = unpack(">I", metadata.read(4))[0]
    entries = [
        FileMeta(unpack(">6I", metadata.read(24)))
        for fileNumber in xrange(fileCount)
    ]
    zipped = metadata.read(1) == "\x01"
    totalPayloadSize = unpack(">I", metadata.read(4))[0]

    # Make payload accessible as stream.
    if zipped:
        payloadHandle = gzip.GzipFile(mode='rb', fileobj=zippedPayload)
        payload = StringIO(payloadHandle.read())
        payloadHandle.close()
    else:
        payload = zippedPayload

    # Go through the file meta and write the files to the target folder.
    for entry in entries:
        path = ReadNullTerminatedString(metadata, entry.pathOffset)
        _, extension = os.path.splitext(
            path)  # Original file extensions: bin, res, dbmanifest, dbx
        filetype = ReadNullTerminatedString(
            metadata, entry.filetypeOffset).lower(
            )  # I prefer lowercase file extensions.

        # Incorporate the file type into the file extension to make the unpacking lossless.
        if filetype == "*deleted*": path += "deleted"
        elif filetype == "<non-resource>":
            if extension == ".res": path = path[:-len(extension)] + ".nonres"
            else: pass  # Keep the original file extension.
        else: path = path[:-len(extension)] + "." + filetype

        # Write to file.
        out = open2(os.path.join(targetFolder, os.path.normpath(path)), "wb")
        payload.seek(entry.payloadOffset)
        out.write(payload.read(entry.payloadSize))
        out.close()
	def rg_export(self):
		if self.date_from and self.date_to:
			
			# if not self.attachment_id:
			inv_ids = self.env['account.invoice'].sudo().search([('date_invoice','>=',self.date_from),('date_invoice','<=',self.date_to)]) #,('type','=','out_invoice')
			
			if (not inv_ids):
				raise ValidationError('No Sale Invoice on this day ')
			
			# File Name
			# file_name = self.name
			
			# Created Excel Workbook and Sheet
			workbook = xlwt.Workbook()
			worksheet = workbook.add_sheet('Sheet 1')
			
			main_style = xlwt.easyxf('font: bold on, height 400; align: wrap 1, vert centre, horiz center; borders: bottom thick, top thick, left thick, right thick')
			sp_style = xlwt.easyxf('font: bold on, height 350;')
			header_style = xlwt.easyxf('font: bold on; align: wrap 1,  horiz center; borders: bottom thin, top thin, left thin, right thin')
			base_style = xlwt.easyxf('align: wrap 1; borders: bottom thin, top thin, left thin, right thin')
			merge_style = xlwt.easyxf('font: bold on,height 200; align: wrap 1,  horiz center; borders: bottom thin, top thin, left thin, right thin')
			
			# worksheet.write_merge(0, 1, 0, 7, file_name, main_style)
			row_index = 0
			
			worksheet.col(0).width = 4000
			worksheet.col(1).width = 4000
			worksheet.col(2).width = 6000
			worksheet.col(3).width = 6000
			worksheet.col(4).width = 6000
			worksheet.col(5).width = 5000
			worksheet.col(6).width = 6000
			worksheet.col(7).width = 6000
			worksheet.col(8).width = 5000
			worksheet.col(9).width = 6000
			worksheet.col(10).width = 16000
			worksheet.col(11).width = 4000
			
			# print "CCCCCCCCCCCCCCCCC",self.company_id._get_address_data
			info = "R.G. - 1 DAILY STOCK ACCOUNT :" +"\n Name of Unit: "+ (str(self.company_id.name) or '') +"\n Address: "+ (str(self.company_id.street) or '') +" "+ (str(self.company_id.street2) or '') +" "+ (str(self.company_id.state_id.name) or '') +" "+ (str(self.company_id.zip) or '')+"\n"+ "C.Ex. Regn No: " + (str(self.company_id.company_registry) or '') + "\n"+ "Name of Comodity: "+ "\n"+ "LIFT CONTROLS BOXES/UNITS: " +"\n"+ "Unit of Quanity :"+"\n"+ "NOS:"
			worksheet.write_merge(0, 6, 0, 6, info,merge_style)
			
			# worksheet.write(row_index, 0, "R.G. - 1 DAILY STOCK ACCOUNT :", base_style)
			# worksheet.write(row_index, 1, "Name of Unit :", base_style)
			# worksheet.write(row_index, 2, self.company_id.name, base_style)
			# worksheet.write(row_index, 3, "Address :", base_style)
			# worksheet.write(row_index, 4, self.display_address, base_style)
			# worksheet.write(row_index, 5, "C.Ex. Regn No :", base_style)
			# worksheet.write(row_index, 6, self.company_id.company_registry, base_style)
			# worksheet.write(row_index, 7, "Name of Comodity :", base_style)
			# worksheet.write(row_index, 8, "LIFT CONTROLS BOXES/UNITS", base_style)
			# worksheet.write(row_index, 9, "Unit of Quanity :", base_style)
			# worksheet.write(row_index, 10, "NOS", base_style)
			
			
			if inv_ids:
				# Headers
				header_fields = ['Invoice Date','Qty of Boxes','Untaxed Invoice Amount','Excise Tax Rate %','Excise Tax Value','Freight Charges','Invoice Number','Total Invoice Amount','Location','GSTIN NO.','Customer','PO NO']
				# row_index += 1
				# worksheet.write_merge(row_index, row_index, 0, 4, "Name of Comodity :", sp_style)
				worksheet.row(row_index).height = 400
				row_index += 7
	
				sp_updates = []
				for index, value in enumerate(header_fields):
					worksheet.write(row_index, index, value, header_style)
				row_index += 1
				sn = 1
				for record in inv_ids:
					tax_ids = []
					taxes = ''
					if self.company_id.id == record.company_id.id:
						if record.company_id.id == 1:
							file_name ='RG1-U1'
						else:
							file_name ='RG1-U2'
						invoice_date = datetime.strptime(record.date_invoice, '%Y-%m-%d').strftime('%d-%b-%y')
						
						
						if record.type == "out_invoice" and self.voucher_type=="SO":
							box_qty = 0
							for box in  record.product_packaging_one2many:
								# if len(box):
									box_qty += box.qty
							quot_ids = self.env['sale.order'].search([('name','=',record.origin)])
							for tax_id in record.invoice_line_ids[0].invoice_line_tax_ids:
								tax_ids.append(str(tax_id.amount))
							taxes = ",".join(tax_ids)
							for rec in quot_ids:
								worksheet.write(row_index, 0, invoice_date, base_style)
								worksheet.write(row_index, 1, box_qty, base_style)
								worksheet.write(row_index, 2, record.amount_untaxed, base_style)
								worksheet.write(row_index, 3, taxes, base_style)
								worksheet.write(row_index, 4, record.amount_tax, base_style)
								worksheet.write(row_index, 5, record.amount_freight, base_style)
								worksheet.write(row_index, 6, record.number, base_style)
								worksheet.write(row_index, 7, record.amount_total, base_style)
								worksheet.write(row_index, 8, rec.partner_shipping_id.city, base_style)
								worksheet.write(row_index, 9, record.partner_id.gstin_no or '', base_style)
								worksheet.write(row_index, 10, record.partner_id.name, base_style)
								worksheet.write(row_index, 11, rec.name, base_style)
								
								sn +=1
								row_index += 1
								
						if record.type == "in_invoice" and self.voucher_type=="PO":
							rfq_ids = self.env['purchase.order'].search([('name','=',record.origin)])
							for rec in rfq_ids:
								worksheet.write(row_index, 0, invoice_date, base_style)
								worksheet.write(row_index, 1, 0, base_style)
								worksheet.write(row_index, 2, record.amount_untaxed, base_style)
								worksheet.write(row_index, 3, taxes, base_style)
								worksheet.write(row_index, 4, record.amount_tax, base_style)
								worksheet.write(row_index, 5, record.amount_freight, base_style)
								worksheet.write(row_index, 6, record.number, base_style)
								worksheet.write(row_index, 7, record.amount_total, base_style)
								worksheet.write(row_index, 8, rec.partner_id.city, base_style)
								worksheet.write(row_index, 9, record.partner_id.gstin_no or '', base_style)
								worksheet.write(row_index, 10, record.partner_id.name, base_style)
								worksheet.write(row_index, 11, rec.name, base_style)
	
								sn +=1
								row_index += 1
			
			
			fp = StringIO()
			workbook.save(fp)
			fp.seek(0)
			data = fp.read()
			fp.close()
			encoded_data = base64.encodestring(data)
			local_tz = pytz.timezone(self._context.get('tz') or 'UTC')
			attach_vals = {
				'name':file_name,
				'datas':encoded_data,
				'datas_fname':'%s.xls' % ( file_name ),
				'res_model':'rg.export.report',
			}
			doc_id = self.env['ir.attachment'].create(attach_vals)
			self.attachment_id = doc_id.id
		
		return {
			'type' : 'ir.actions.act_url',
			'url': '/web/binary/download_document?model=%s&field=%s&id=%s&filename=%s.xls' % (self.attachment_id.res_model,'datas',self.id,self.attachment_id.name),
			'target': 'self',
			}
Example #58
0
 def _get_excel_trial_data(self, cr, uid, context=None):
     period_name = context.get('period_id')
     workbook = xlwt.Workbook()
     worksheet = workbook.add_sheet('Sheet 1')
     font = xlwt.Font()
     font.bold = True
     header = xlwt.easyxf('font: bold 1, height 280')
     bottomheader = xlwt.easyxf('font: bold 1, height 200')
     header1 = xlwt.easyxf(
         'pattern: pattern solid, fore_colour white; borders: top double, bottom double, bottom_color black; font: bold on, height 180, color black; align: wrap off'
     )
     style = xlwt.easyxf('font: height 180')
     worksheet.col(0).width = 5000
     worksheet.col(1).width = 5000
     worksheet.col(2).width = 5000
     worksheet.row(0).height = 500
     worksheet.row(1).height = 500
     worksheet.row(2).height = 500
     company_name = self.pool.get('res.users').browse(cr, uid,
                                                      uid).company_id.name
     worksheet.write(0, 1, company_name, header)
     worksheet.write(1, 1, period_name, header)
     worksheet.write(2, 1, "Trial Balance", header)
     worksheet.write(4, 0, "Account", header1)
     worksheet.write(4, 1, "", header1)
     worksheet.write(4, 2, "Debit", header1)
     worksheet.write(4, 3, "Credit", header1)
     worksheet.write(4, 4, "YTD Debit", header1)
     worksheet.write(4, 5, "YTD Credit", header1)
     row = 5
     context['form'].update(
         {'fiscalyear_id': context['form']['fiscalyear_id'][0]})
     account_balance_inherit_obj = account_balance(cr, uid, '', context)
     acc_data = account_balance_inherit_obj.lines(context.get('form'), [1],
                                                  None)
     tot_deb = tot_cre = tot_ytd_deb = tot_ytd_cre = 0.00
     for acc in acc_data:
         worksheet.write(row, 0, acc['name'], style)
         worksheet.write(row, 2, round(acc['debit'] or 0.00, 2), style)
         worksheet.write(row, 3, round(acc['credit'] or 0.00, 2), style)
         worksheet.write(row, 4, round(acc['ytd_debit'] or 0.00, 2), style)
         worksheet.write(row, 5, round(acc['ytd_credit'] or 0.00, 2), style)
         tot_deb += acc['debit']
         tot_cre += acc['credit']
         tot_ytd_deb += acc['ytd_debit']
         tot_ytd_cre += acc['ytd_credit']
         row += 1
     row += 2
     worksheet.write(row, 0, 'Total', header1)
     worksheet.write(row, 1, "", header1)
     worksheet.write(row, 2, round(tot_deb or 0.00, 2), header1)
     worksheet.write(row, 3, round(tot_cre or 0.00, 2), header1)
     worksheet.write(row, 4, round(tot_ytd_deb or 0.00, 2), header1)
     worksheet.write(row, 5, round(tot_ytd_cre or 0.00, 2), header1)
     row += 2
     worksheet.write(row, 0, 'Difference', header1)
     worksheet.write(row, 1, "", header1)
     worksheet.write(row, 2, "", header1)
     worksheet.write(row, 3, round(tot_deb - tot_cre or 0.00, 2), header1)
     worksheet.write(row, 4, "", header1)
     worksheet.write(row, 5, round(tot_ytd_deb - tot_ytd_cre or 0.00, 2),
                     header1)
     fp = StringIO()
     workbook.save(fp)
     fp.seek(0)
     data = fp.read()
     fp.close()
     return base64.b64encode(data)
Example #59
0
    def test_different_resolutions(self, src_dimensions):
        """
        Test various resolutions of images to make thumbnails of.

        Note that our test sizes are small=(200, 100) and large=(400, 200).

        1. Images should won't be blown up if it's too small, so a (100, 50)
           resolution image will remain (100, 50).
        2. However, images *will* be converted using our format and quality
           settings (JPEG, 75% -- the PIL default). This is because images with
           relatively small dimensions not compressed properly.
        3. Image thumbnail naming will maintain the naming convention of the
           target resolution, even if the image was not actually scaled to that
           size (i.e. it was already smaller). This is mostly because it's
           simpler to be consistent, but it also lets us more easily tell which
           configuration a thumbnail was created under.
        """
        # Create a source image...
        image = Image.new("RGB", src_dimensions, "blue")
        image_buff = StringIO()
        image.save(image_buff, format="PNG")
        image_buff.seek(0)
        image_name = "src_course_image.png"

        course = CourseFactory.create(course_image=image_name)

        # Save the image to the contentstore...
        course_image_asset_key = StaticContent.compute_location(
            course.id, course.course_image)
        course_image_content = StaticContent(course_image_asset_key,
                                             image_name, 'image/png',
                                             image_buff)
        contentstore().save(course_image_content)

        # Now generate the CourseOverview...
        config = CourseOverviewImageConfig.current()
        course_overview = CourseOverview.get_from_id(course.id)
        image_urls = course_overview.image_urls

        for image_url, target in [(image_urls['small'], config.small),
                                  (image_urls['large'], config.large)]:
            image_key = StaticContent.get_location_from_path(image_url)
            image_content = AssetManager.find(image_key)
            image = Image.open(StringIO(image_content.data))

            # Naming convention for thumbnail
            self.assertTrue(
                image_url.endswith(
                    'src_course_image-png-{}x{}.jpg'.format(*target)))

            # Actual thumbnail data
            src_x, src_y = src_dimensions
            target_x, target_y = target
            image_x, image_y = image.size

            # I'm basically going to assume the image library knows how to do
            # the right thing in terms of handling aspect ratio. We're just
            # going to make sure that small images aren't blown up, and that
            # we never exceed our target sizes
            self.assertLessEqual(image_x, target_x)
            self.assertLessEqual(image_y, target_y)

            if src_x < target_x and src_y < target_y:
                self.assertEqual(src_x, image_x)
                self.assertEqual(src_y, image_y)
Example #60
0
class Context:
    def __init__(self, file, name, modules, flags=0):
        self.file = file
        self.name = name
        self.modules = modules
        self.flags = flags

        self.indent = 2
        self.__indentstr = '  '

        self.codeobjs = []
        self.try_blocks = []
        self.exc_blocks = 0
        self.loop_blocks = []
        self.finally_blocks = []
        self.labels = []

        self.stack_level_blocks = []

        self.buf = []
        self.i = 0

        self.codebuffer = StringIO()
        self.__decls = [
            # (type, name, default value, deref)
            ('err', 'Py_ssize_t', '0', False),
            ('retval', 'PyObject*', 'NULL', False),
            ('tmp', 'PyObject*', 'NULL', False),
            ('u', 'PyObject*', 'NULL', False),
            ('v', 'PyObject*', 'NULL', False),
            ('w', 'PyObject*', 'NULL', False),
            ('x', 'PyObject*', 'NULL', False),
            ('tb', 'PyObject*', 'NULL', True),
            ('val', 'PyObject*', 'NULL', True),
            ('exc', 'PyObject*', 'NULL', True)
        ]

        self._consts = []

    def finish(self):
        self.insert_line('f->f_exci = -1;')
        self.insert_line('goto end;')
        self.insert_line('error:')
        self.insert_line('  retval = NULL;')

        self.labels.append('end')
        self.insert_line('end:')
        for d in self.__decls:
            if d[3]:
                self.insert_line('  Py_XDECREF(%s);' % d[0])

        self.insert_line('f->f_stacktop = stack_pointer;')
        self.insert_line('f->f_lasti = -2;')
        self.insert_line('return retval;')

        self.file.add_common_header('PyObject* %s(PypperoniFrame* f);' % self.name)
        self.file.write('PyObject* %s(PypperoniFrame* f) {\n' % self.name)
        for d in self.__decls:
            if d[2] is not None:
                self.file.write('  %s %s = %s;\n' % (d[1], d[0], d[2]))

            else:
                self.file.write('  %s %s;\n' % (d[1], d[0]))

        self.file.write('  register PyObject** stack_pointer = f->f_stacktop;\n')

        if self.flags & CO_GENERATOR:
            for i in xrange(len(self.labels) - 1):
                _this = int(self.labels[i])
                _next = self.labels[i + 1]
                if _next != 'end':
                    _next = 'label_%d' % _next

                self.file.write('  if (f->f_lasti == %d) '
                                'goto %s;\n' % (_this, _next))

        self.codebuffer.seek(0)
        self.file.write(self.codebuffer.read() + '}\n')
        self.file.consider_next()

    def flushconsts(self):
        self.flushconsts()

    def begin_block(self):
        self.insert_line('{')
        self.indent += 2
        self.__indentstr += '  '

    def end_block(self):
        self.indent -= 2
        self.__indentstr = self.__indentstr[:-2]
        self.insert_line('}')

    def insert_line(self, line):
        self.codebuffer.write(self.__indentstr)
        self.codebuffer.write(line)
        self.codebuffer.write('\n')

    def insert_handle_error(self, line, label):
        self.insert_line('f->f_exci = %d;' % label)
        self.insert_line('f->f_excline = %d;' % line)
        if self.try_blocks:
            self.insert_line('goto label_%d;' % self.try_blocks[-1])

        else:
            self.insert_line('goto error;')

    def add_decl(self, name, type='PyObject*', val='NULL', deref=True):
        self.__decls.append((name, type, val, deref))

    def add_decl_once(self, name, type='PyObject*', val='NULL', deref=True):
        for n, _, _, _ in self.__decls:
            if n == name:
                return

        self.__decls.append((name, type, val, deref))

    def setup_stack_block(self, label):
        self.stack_level_blocks.append(label)
        self.insert_line('PyDict_SetItem(f->f_stacklevel, __pypperoni_pyint(%d), '
                         '__pypperoni_pyint(STACK_LEVEL()));' % label)

    def pop_stack_block(self):
        label = self.stack_level_blocks.pop()
        self.insert_restore_stack_label(label)

    def insert_restore_stack_label(self, label):
        levelstr = 'PyInt_AS_LONG(PyDict_GetItem(f->f_stacklevel, __pypperoni_pyint(%d)))' % label
        self.insert_restore_stack(levelstr)

    def insert_restore_stack(self, levelstr):
        self.insert_line('while (STACK_LEVEL() > %s)' % levelstr)
        self.begin_block()
        self.insert_line('v = POP();')
        self.insert_line('Py_DECREF(v);')
        self.end_block()

    def insert_label(self, label):
        self.insert_line('label_%d:' % label)
        self.labels.append(label)

    def register_const(self, value):
        with Lock():
            self._consts.append(value)
            ret = '__%s_get_const(%d)' % (self.file.uid, len(self._consts) - 1)

        return ret

    def register_literal(self, value):
        getter = self.register_const(value)
        return '__pypperoni_const2str(%s) /* %s */' % (getter, value)

    def dumpconsts(self):
        return marshal.dumps(tuple(self._consts))

    def flushconsts(self):
        blob = self.dumpconsts()
        blobsize = len(blob)
        blobptr = '__data_blob_%s' % self.name

        self.file.write('const char %s[%d] = {\n  ' % (blobptr, blobsize))

        i = 0
        for c in blob:
            self.file.write('%d, ' % ord(c))
            i += 1
            if not i % 16:
                self.file.write('\n  ')

        self.file.write('};\n\n')
        self.file.add_common_header('PyObject* __%s_get_const(Py_ssize_t index);\n' % self.file.uid)
        self.file.write('PyObject* __%s_get_const(Py_ssize_t index) {\n' % self.file.uid)
        self.file.write('  PyObject* it;\n')
        self.file.write('  static PyObject* page = NULL;\n')
        self.file.write('  if (page == NULL) {\n')
        self.file.write('     page = PyMarshal_ReadObjectFromString((char*)%s, %d);\n' % (blobptr, blobsize))
        self.file.write('  }\n')
        self.file.write('  it = PyTuple_GET_ITEM(page, index);\n')
        self.file.write('  Py_INCREF(it);\n')
        self.file.write('  return it;\n')
        self.file.write('}\n\n')