def test_create(self):
        #mock file-like objects to read and write
        mock_output = BytesIO()
        test_bytes = b"Testing file!"
        test_bytes_length = len(test_bytes)
        mock_input = BytesIO(test_bytes)
        
        create(output_file=mock_output,
           input_file=mock_input, 
           input_file_length=test_bytes_length,
           input_file_name="Testing.file",
           piece_length=512000,
           announce_url="http://localhost:8000", 
           comment="Test torrent file", 
           created_by="Martin Ashby's Test script...")

        mock_output.seek(0)
        output_dict = bdecode_file(mock_output)

        #check the output has the appropriate keys.
        info_dict = output_dict.get(b"info")
        self.assertTrue(len(info_dict))
        self.assertTrue(info_dict.get(b"piece length") == 512000)
        self.assertTrue(info_dict.get(b"name") == b"Testing.file")
        self.assertTrue(info_dict.get(b"length") == test_bytes_length)
        self.assertTrue(info_dict.get(b"pieces"))
        self.assertTrue(output_dict.get(b"announce") == b"http://localhost:8000")
        self.assertTrue(type(output_dict.get(b"creation date")) == int)
        self.assertTrue(output_dict.get(b"comment") == b"Test torrent file")
        self.assertTrue(output_dict.get(b"created by") == b"Martin Ashby's Test script...")
Example #2
0
def zip_layer_folder(dir_path, layer_name):
    """
    Create a zip archive with the content of the folder located at `dir_path`
    and name it with `layer_name`.

    Parameters
    ----------
    dir_path: str
        The path to the temporary folder in which are located the files to
        be zipped.

    layer_name: str
        The name of the concerned layer (will be used as file name for the
        zip archive).

    Returns
    -------
    raw_content: str
        The zip archive
    archive_name: str
        The name of the archive (used later in the header of the response).
    """
    filenames = os.listdir(dir_path)
    zip_stream = BytesIO()
    myZip = ZipFile(zip_stream, "w", compression=ZIP_DEFLATED)
    for filename in filenames:
        if not filename.endswith('.geojson'):
            f_name = path_join(dir_path, filename)
            myZip.write(f_name, filename, ZIP_DEFLATED)
    myZip.close()
    zip_stream.seek(0)
    return zip_stream.read(), ''.join([layer_name, ".zip"])
Example #3
0
File: csv.py Project: dav009/dask
def bytes_read_csv(b, header, kwargs, dtypes=None, columns=None):
    """ Convert a block of bytes to a Pandas DataFrame

    Parameters
    ----------
    b: bytestring
        The content to be parsed with pandas.read_csv
    header: bytestring
        An optional header to prepend to b
    kwargs: dict
        A dictionary of keyword arguments to be passed to pandas.read_csv
    dtypes: dict
        DTypes to assign to columns

    See Also:
        dask.dataframe.csv.read_csv_from_bytes
    """
    bio = BytesIO()
    if not b.startswith(header.rstrip()):
        bio.write(header)
    bio.write(b)
    bio.seek(0)
    df = pd.read_csv(bio, **kwargs)
    if dtypes:
        coerce_dtypes(df, dtypes)

    if columns and (list(df.columns) != list(columns)):
        raise ValueError("Columns do not match", df.columns, columns)
    return df
Example #4
0
def test_read_opts():
    # tests if read is seeing option sets, at initialization and after
    # initialization
    arr = np.arange(6).reshape(1,6)
    stream = BytesIO()
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    back_dict = rdr.get_variables()
    rarr = back_dict['a']
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, squeeze_me=True)
    assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
    rdr.squeeze_me = False
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, byte_order=boc.native_code)
    assert_array_equal(rdr.get_variables()['a'], arr)
    # inverted byte code leads to error on read because of swapped
    # header etc
    rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
    assert_raises(Exception, rdr.get_variables)
    rdr.byte_order = boc.native_code
    assert_array_equal(rdr.get_variables()['a'], arr)
    arr = np.array(['a string'])
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    assert_array_equal(rdr.get_variables()['a'], arr)
    rdr = MatFile5Reader(stream, chars_as_strings=False)
    carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
    assert_array_equal(rdr.get_variables()['a'], carr)
    rdr.chars_as_strings = True
    assert_array_equal(rdr.get_variables()['a'], arr)
Example #5
0
def test_str_round():
    # from report by Angus McMorland on mailing list 3 May 2010
    stream = BytesIO()
    in_arr = np.array(['Hello', 'Foob'])
    out_arr = np.array(['Hello', 'Foob '])
    savemat(stream, dict(a=in_arr))
    res = loadmat(stream)
    # resulted in ['HloolFoa', 'elWrdobr']
    assert_array_equal(res['a'], out_arr)
    stream.truncate(0)
    stream.seek(0)
    # Make Fortran ordered version of string
    in_str = in_arr.tostring(order='F')
    in_from_str = np.ndarray(shape=a.shape,
                             dtype=in_arr.dtype,
                             order='F',
                             buffer=in_str)
    savemat(stream, dict(a=in_from_str))
    assert_array_equal(res['a'], out_arr)
    # unicode save did lead to buffer too small error
    stream.truncate(0)
    stream.seek(0)
    in_arr_u = in_arr.astype('U')
    out_arr_u = out_arr.astype('U')
    savemat(stream, {'a': in_arr_u})
    res = loadmat(stream)
    assert_array_equal(res['a'], out_arr_u)
Example #6
0
def test_empty_string():
    # make sure reading empty string does not raise error
    estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
    fp = open(estring_fname, 'rb')
    rdr = MatFile5Reader(fp)
    d = rdr.get_variables()
    fp.close()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    # empty string round trip.  Matlab cannot distiguish
    # between a string array that is empty, and a string array
    # containing a single empty string, because it stores strings as
    # arrays of char.  There is no way of having an array of char that
    # is not empty, but contains an empty string.
    stream = BytesIO()
    savemat(stream, {'a': np.array([''])})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': np.array([], dtype='U1')})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.close()
Example #7
0
    def send_tryton_url(self, path):
        self.send_response(300)
        hostname = (config.get('jsonrpc', 'hostname')
            or unicode(socket.getfqdn(), 'utf8'))
        hostname = '.'.join(encodings.idna.ToASCII(part) for part in
            hostname.split('.'))
        values = {
            'hostname': hostname,
            'path': path,
            }
        content = BytesIO()

        def write(str_):
            content.write(str_.encode('utf-8'))
        write('<html')
        write('<head>')
        write('<meta http-equiv="Refresh" '
            'content="0;url=tryton://%(hostname)s%(path)s"/>' % values)
        write('<title>Moved</title>')
        write('</head>')
        write('<body>')
        write('<h1>Moved</h1>')
        write('<p>This page has moved to '
            '<a href="tryton://%(hostname)s%(path)s">'
            'tryton://%(hostname)s%(path)s</a>.</p>' % values)
        write('</body>')
        write('</html>')
        length = content.tell()
        content.seek(0)
        self.send_header('Location', 'tryton://%(hostname)s%(path)s' % values)
        self.send_header('Content-type', 'text/html')
        self.send_header('Content-Length', str(length))
        self.end_headers()
        self.copyfile(content, self.wfile)
        content.close()
Example #8
0
def test_save_dict():
    # Test that dict can be saved (as recarray), loaded as matstruct
    dict_types = ((dict, False),)
    try:
        from collections import OrderedDict
    except ImportError:
        pass
    else:
        dict_types += ((OrderedDict, True),)
    ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
    ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
    for dict_type, is_ordered in dict_types:
        # Initialize with tuples to keep order for OrderedDict
        d = dict_type([('a', 1), ('b', 2)])
        stream = BytesIO()
        savemat(stream, {'dict': d})
        stream.seek(0)
        vals = loadmat(stream)['dict']
        assert_equal(set(vals.dtype.names), set(['a', 'b']))
        if is_ordered:  # Input was ordered, output in ab order
            assert_array_equal(vals, ab_exp)
        else:  # Not ordered input, either order output
            if vals.dtype.names[0] == 'a':
                assert_array_equal(vals, ab_exp)
            else:
                assert_array_equal(vals, ba_exp)
Example #9
0
def export(request):
    path = request.GET.get("path")
    if not path:
        raise Http404

    stores = Store.objects.live().filter(pootle_path__startswith=path)
    num_items = stores.count()

    if not num_items:
        raise Http404

    if num_items == 1:
        store = stores.get()
        contents = BytesIO(store.serialize())
        name = os.path.basename(store.pootle_path)
        contents.seek(0)
        return download(contents.read(), name, "application/octet-stream")

    # zip all the stores together
    f = BytesIO()
    prefix = path.strip("/").replace("/", "-")
    if not prefix:
        prefix = "export"
    with BytesIO() as f:
        with ZipFile(f, "w") as zf:
            for store in stores:
                try:
                    data = store.serialize()
                except Exception as e:
                    logging.error("Could not serialize %r: %s",
                                  store.pootle_path, e)
                    continue
                zf.writestr(prefix + store.pootle_path, data)

        return download(f.getvalue(), "%s.zip" % (prefix), "application/zip")
    def open(self, name, mode='rb'):
        resp = self.b2.download_file(name)

        output = BytesIO()
        output.write(resp)
        output.seek(0)
        return File(output, name)
Example #11
0
class TestFileUploadParser(TestCase):
    def setUp(self):
        class MockRequest(object):
            pass
        from io import BytesIO
        self.stream = BytesIO(
            "Test text file".encode('utf-8')
        )
        request = MockRequest()
        request.upload_handlers = (MemoryFileUploadHandler(),)
        request.META = {
            'HTTP_CONTENT_DISPOSITION': 'Content-Disposition: inline; filename=file.txt'.encode('utf-8'),
            'HTTP_CONTENT_LENGTH': 14,
        }
        self.parser_context = {'request': request, 'kwargs': {}}

    def test_parse(self):
        """ Make sure the `QueryDict` works OK """
        parser = FileUploadParser()
        self.stream.seek(0)
        data_and_files = parser.parse(self.stream, None, self.parser_context)
        file_obj = data_and_files.files['file']
        self.assertEqual(file_obj._size, 14)

    def test_get_filename(self):
        parser = FileUploadParser()
        filename = parser.get_filename(self.stream, None, self.parser_context)
        self.assertEqual(filename, 'file.txt'.encode('utf-8'))
Example #12
0
    def __init__(self, strings=()):
        self.strings = OrderedDict((s, 0) for s in strings)

        self.records = []
        offset = 0
        buf = BytesIO()
        for key in tuple(self.strings.iterkeys()):
            utf8 = utf8_text(key[:self.MAX_STRING_LENGTH])
            l = len(utf8)
            sz_bytes = encint(l)
            raw = sz_bytes + utf8
            if 0xfbf8 - buf.tell() < 6 + len(raw):
                # Records in PDB files cannot be larger than 0x10000, so we
                # stop well before that.
                pad = 0xfbf8 - buf.tell()
                buf.write(b'\0' * pad)
                self.records.append(buf.getvalue())
                buf.seek(0), buf.truncate(0)
                offset = len(self.records) * 0x10000
            buf.write(raw)
            self.strings[key] = offset
            offset += len(raw)

        val = buf.getvalue()
        if val:
            self.records.append(align_block(val))
Example #13
0
def deserialize(schema, binary):
    bytes_writer = BytesIO()
    bytes_writer.write(binary)
    bytes_writer.seek(0)

    res = fastavro.schemaless_reader(bytes_writer, schema)
    return res
Example #14
0
class FakePayload(object):
    """
    A wrapper around BytesIO that restricts what can be read since data from
    the network can't be seeked and cannot be read outside of its content
    length. This makes sure that views can't do anything under the test client
    that wouldn't work in Real Life.
    """
    def __init__(self, content=None):
        self.__content = BytesIO()
        self.__len = 0
        self.read_started = False
        if content is not None:
            self.write(content)

    def __len__(self):
        return self.__len

    def read(self, num_bytes=None):
        if not self.read_started:
            self.__content.seek(0)
            self.read_started = True
        if num_bytes is None:
            num_bytes = self.__len or 0
        assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
        content = self.__content.read(num_bytes)
        self.__len -= num_bytes
        return content

    def write(self, content):
        if self.read_started:
            raise ValueError("Unable to write a payload after he's been read")
        content = force_bytes(content)
        self.__content.write(content)
        self.__len += len(content)
Example #15
0
def _save(im, fp, filename):
    fp.write(_MAGIC)  # (2+2)
    sizes = im.encoderinfo.get("sizes",
                               [(16, 16), (24, 24), (32, 32), (48, 48),
                                (64, 64), (128, 128), (255, 255)])
    width, height = im.size
    filter(lambda x: False if (x[0] > width or x[1] > height or
                               x[0] > 255 or x[1] > 255) else True, sizes)
    sizes = sorted(sizes, key=lambda x: x[0])
    fp.write(struct.pack("H", len(sizes)))  # idCount(2)
    offset = fp.tell() + len(sizes)*16
    for size in sizes:
        width, height = size
        fp.write(struct.pack("B", width))  # bWidth(1)
        fp.write(struct.pack("B", height))  # bHeight(1)
        fp.write(b"\0")  # bColorCount(1)
        fp.write(b"\0")  # bReserved(1)
        fp.write(b"\0\0")  # wPlanes(2)
        fp.write(struct.pack("H", 32))  # wBitCount(2)

        image_io = BytesIO()
        tmp = im.copy()
        tmp.thumbnail(size, Image.ANTIALIAS)
        tmp.save(image_io, "png")
        image_io.seek(0)
        image_bytes = image_io.read()
        bytes_len = len(image_bytes)
        fp.write(struct.pack("I", bytes_len))  # dwBytesInRes(4)
        fp.write(struct.pack("I", offset))  # dwImageOffset(4)
        current = fp.tell()
        fp.seek(offset)
        fp.write(image_bytes)
        offset = offset + bytes_len
        fp.seek(current)
Example #16
0
class ZippedStoreShaWriter(Sha1Writer):

    """Remembers everything someone writes to it and generates a sha"""
    __slots__ = ('buf', 'zip')

    def __init__(self):
        Sha1Writer.__init__(self)
        self.buf = BytesIO()
        self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)

    def __getattr__(self, attr):
        return getattr(self.buf, attr)

    def write(self, data):
        alen = Sha1Writer.write(self, data)
        self.buf.write(self.zip.compress(data))

        return alen

    def close(self):
        self.buf.write(self.zip.flush())

    def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
        """Seeking currently only supports to rewind written data
        Multiple writes are not supported"""
        if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
            raise ValueError("Can only seek to position 0")
        # END handle offset
        self.buf.seek(0)

    def getvalue(self):
        """:return: string value from the current stream position to the end"""
        return self.buf.getvalue()
Example #17
0
    def to_pptx(self):
        logger.info('Converting svg -> html -> png -> pptx')
        content = None
        try:
            # convert to png
            png_fn = self._rasterize_png()

            # create blank presentation slide layout
            pres = Presentation()
            blank_slidelayout = pres.slide_layouts[6]
            slide = pres.slides.add_slide(blank_slidelayout)

            self._pptx_add_title(slide)
            self._pptx_add_url(slide)
            self._pptx_add_png(slide, png_fn)
            self._pptx_add_hawc_logo(slide)

            # save as object
            content = BytesIO()
            pres.save(content)
            content.seek(0)

        except Exception as e:
            logger.error(e, exc_info=True)
        finally:
            self.cleanup()

        return content
Example #18
0
class ChunkBuffer:
    BUFFER_SIZE = 1 * 1024 * 1024

    def __init__(self, key, chunker_params=CHUNKER_PARAMS):
        self.buffer = BytesIO()
        self.packer = msgpack.Packer(unicode_errors='surrogateescape')
        self.chunks = []
        self.key = key
        self.chunker = Chunker(self.key.chunk_seed, *chunker_params)

    def add(self, item):
        self.buffer.write(self.packer.pack(StableDict(item)))
        if self.is_full():
            self.flush()

    def write_chunk(self, chunk):
        raise NotImplementedError

    def flush(self, flush=False):
        if self.buffer.tell() == 0:
            return
        self.buffer.seek(0)
        chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
        self.buffer.seek(0)
        self.buffer.truncate(0)
        # Leave the last partial chunk in the buffer unless flush is True
        end = None if flush or len(chunks) == 1 else -1
        for chunk in chunks[:end]:
            self.chunks.append(self.write_chunk(chunk))
        if end == -1:
            self.buffer.write(chunks[-1])

    def is_full(self):
        return self.buffer.tell() > self.BUFFER_SIZE
Example #19
0
 def test_simple_bytesio(self):
     f = BytesIO()
     c = Commit()
     c.committer = c.author = b"Jelmer <*****@*****.**>"
     c.commit_time = c.author_time = 1271350201
     c.commit_timezone = c.author_timezone = 0
     c.message = b"This is the first line\nAnd this is the second line.\n"
     c.tree = Tree().id
     write_commit_patch(f, c, b"CONTENTS", (1, 1), version="custom")
     f.seek(0)
     lines = f.readlines()
     self.assertTrue(lines[0].startswith(b"From 0b0d34d1b5b596c928adc9a727a4b9e03d025298"))
     self.assertEqual(lines[1], b"From: Jelmer <*****@*****.**>\n")
     self.assertTrue(lines[2].startswith(b"Date: "))
     self.assertEqual(
         [
             b"Subject: [PATCH 1/1] This is the first line\n",
             b"And this is the second line.\n",
             b"\n",
             b"\n",
             b"---\n",
         ],
         lines[3:8],
     )
     self.assertEqual([b"CONTENTS-- \n", b"custom\n"], lines[-2:])
     if len(lines) >= 12:
         # diffstat may not be present
         self.assertEqual(lines[8], b" 0 files changed\n")
Example #20
0
    def test_commit_serialization(self):
        assert_commit_serialization(self.gitrwrepo, self.gitrwrepo.head, True)

        rwrepo = self.gitrwrepo
        make_object = rwrepo.odb.store
        # direct serialization - deserialization can be tested afterwards
        # serialization is probably limited on IO
        hc = rwrepo.commit(rwrepo.head)

        nc = 5000
        st = time()
        for i in xrange(nc):
            cm = Commit(rwrepo, Commit.NULL_BIN_SHA, hc.tree,
                        hc.author, hc.authored_date, hc.author_tz_offset,
                        hc.committer, hc.committed_date, hc.committer_tz_offset,
                        str(i), parents=hc.parents, encoding=hc.encoding)

            stream = BytesIO()
            cm._serialize(stream)
            slen = stream.tell()
            stream.seek(0)

            cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
        # END commit creation
        elapsed = time() - st

        print("Serialized %i commits to loose objects in %f s ( %f commits / s )"
              % (nc, elapsed, nc / elapsed), file=sys.stderr)
def create_dummy_image():
    file = BytesIO()
    image = Image.new('RGBA', size=(50, 50), color=(155, 0, 0))
    image.save(file, 'png')
    file.name = 'test_image.png'
    file.seek(0)
    return file
Example #22
0
 def _process(self):
     f = request.files['logo']
     try:
         img = Image.open(f)
     except IOError:
         flash(_('You cannot upload this file as a logo.'), 'error')
         return jsonify_data(content=None)
     if img.format.lower() not in {'jpeg', 'png', 'gif'}:
         flash(_('The file has an invalid format ({format})').format(format=img.format), 'error')
         return jsonify_data(content=None)
     if img.mode == 'CMYK':
         flash(_('The logo you uploaded is using the CMYK colorspace and has been converted to RGB. Please check if '
                 'the colors are correct and convert it manually if necessary.'), 'warning')
         img = img.convert('RGB')
     image_bytes = BytesIO()
     img.save(image_bytes, 'PNG')
     image_bytes.seek(0)
     content = image_bytes.read()
     self.event.logo = content
     self.event.logo_metadata = {
         'hash': crc32(content),
         'size': len(content),
         'filename': os.path.splitext(secure_filename(f.filename, 'logo'))[0] + '.png',
         'content_type': 'image/png'
     }
     flash(_('New logo saved'), 'success')
     logger.info("New logo '%s' uploaded by %s (%s)", f.filename, session.user, self.event)
     return jsonify_data(content=get_logo_data(self.event))
Example #23
0
	def upload(self, path, filename,buffersize=None,callback=None, local_path=None):
		try:
			self.error = None
			#print('Upload = ' + path + filename)
			#print('Size = %.1f kB' % (os.path.getsize(filename) / 1024.0))
			#print('start upload')
			if local_path:
				file_obj = open(local_path+filename, 'rb')
			else:
				file_obj = open(filename, 'rb')
			offset = 0
			while True:
				if not buffersize:
					filesize = self.conn.storeFile(self.service_name, path+filename, file_obj)
					break
				else:	
					buffer_obj = file_obj.read(buffersize)			
					if buffer_obj:
						buffer_fileobj = BytesIO()
						buffer_fileobj.write(buffer_obj)
						buffer_fileobj.seek(0)
						offset_new = self.conn.storeFileFromOffset(self.service_name, path+filename, buffer_fileobj, offset=offset, truncate=False)
						#return the file position where the next byte will be written.
						offset = offset_new
						if callback:
							callback(offset)
					else:
						break
			file_obj.close()
			#print('upload finished')
		except Exception as e:
			if self.print_errors:
				print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
			else:
				self.error = 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno) + str(type(e).__name__) + str(e)
Example #24
0
	def download(self, path, filename,buffersize=None,callback=None, local_path=None):
		try:
			self.error = None
			#print('Download = ' + path + filename)
			attr = self.conn.getAttributes(self.service_name, path+filename)
			#print('Size = %.1f kB' % (attr.file_size / 1024.0))
			#print('start download')
			file_obj = BytesIO()
			if local_path:
				fw = open(local_path+filename, 'wb')
			else:
				fw = open(filename, 'wb')
			offset = 0
			transmit =0
			while True:
				if not buffersize:
					file_attributes, filesize = self.conn.retrieveFile(self.service_name, path+filename, file_obj)
				else:
					file_attributes, filesize = self.conn.retrieveFileFromOffset(self.service_name, path+filename, file_obj,offset=offset,max_length=buffersize)
					if callback:
						transmit = transmit + filesize
						callback(transmit)
				file_obj.seek(offset)
				for line in file_obj:
					fw.write(line)
				offset = offset + filesize
				if (not buffersize) or (filesize == 0):
					break
			fw.close()
			#print('download finished')
		except Exception as e:
			if self.print_errors:
				print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
			else:
				self.error = 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno) + str(type(e).__name__) + str(e)
Example #25
0
    def test_read_atom(self):
        """Test that all fields are read from PDB ATOM records"""
        s = BytesIO()
        # PDB is fixed-format; we should be able to read coordinates even
        # without spaces between them
        s.write(b'ATOM      1  N   ALA A   5    3000.0001000.4002000.600'
                b'  2.00  6.40           N\n')
        s.seek(0)

        m = IMP.Model()
        pdb = IMP.atom.read_pdb(s, m)
        atoms = IMP.atom.get_by_type(pdb, IMP.atom.ATOM_TYPE)
        self.assertEqual(len(atoms), 1)
        a = IMP.atom.Atom(atoms[0])
        r = IMP.atom.Residue(a.get_parent())
        c = IMP.atom.Chain(r.get_parent())
        self.assertEqual(a.get_input_index(), 1)
        self.assertEqual(a.get_atom_type().get_string(), 'N')
        # Note: currently don't read alternate location or insertion code
        self.assertEqual(r.get_residue_type().get_string(), 'ALA')
        self.assertEqual(c.get_id(), 'A')
        self.assertEqual(r.get_index(), 5)
        coord = IMP.core.XYZ(a).get_coordinates()
        self.assertAlmostEqual(coord[0], 3000.000, delta=0.001)
        self.assertAlmostEqual(coord[1], 1000.400, delta=0.001)
        self.assertAlmostEqual(coord[2], 2000.600, delta=0.001)
        self.assertAlmostEqual(a.get_occupancy(), 2.00, delta=0.01)
        self.assertAlmostEqual(a.get_temperature_factor(), 6.40, delta=0.01)
Example #26
0
    def write_data(self, item, last=False, stream_id=None):
        """
        Send a DATA frame that is tracked by the local state machine.

        Write a DATA frame using the H2 Connection object, will only work if the stream is in a state to send
        DATA frames. Uses flow control to split data into multiple data frames if it exceeds the size that can
        be in a single frame.

        :param item: The content of the DATA frame
        :param last: Flag to signal if this is the last frame in stream.
        :param stream_id: Id of stream to send frame on. Will use the request stream ID if None
        """
        if isinstance(item, (text_type, binary_type)):
            data = BytesIO(self.encode(item))
        else:
            data = item

        # Find the length of the data
        data.seek(0, 2)
        data_len = data.tell()
        data.seek(0)

        # If the data is longer than max payload size, need to write it in chunks
        payload_size = self.get_max_payload_size()
        while data_len > payload_size:
            self.write_data_frame(data.read(payload_size), False, stream_id)
            data_len -= payload_size
            payload_size = self.get_max_payload_size()

        self.write_data_frame(data.read(), last, stream_id)
Example #27
0
 def _download_part( self, part_num ):
     """
     Download a part from the source URL. Returns a BytesIO buffer. The buffer's tell() method
     will return the size of the downloaded part, which may be less than the requested part
     size if the part is the last one for the URL.
     """
     buf = BytesIO( )
     with closing( pycurl.Curl( ) ) as c:
         c.setopt( c.URL, self.url )
         c.setopt( c.WRITEDATA, buf )
         c.setopt( c.FAILONERROR, 1 )
         start, end = self._get_part_range( part_num )
         c.setopt( c.RANGE, "%i-%i" % (start, end - 1) )
         try:
             c.perform( )
         except pycurl.error as e:
             error_code, message = e
             if error_code == c.E_BAD_DOWNLOAD_RESUME:  # bad range for FTP
                 pass
             elif error_code == c.E_HTTP_RETURNED_ERROR:
                 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.17
                 if c.getinfo( c.RESPONSE_CODE ) == 416:
                     pass
                 else:
                     raise
             else:
                 raise
             buf.truncate( 0 )
             buf.seek( 0 )
     return buf
Example #28
0
def tar_stream(repo, tree, mtime, format=''):
    """
    Returns a generator that lazily assembles a .tar.gz archive, yielding it in
    pieces (bytestrings). To obtain the complete .tar.gz binary file, simply
    concatenate these chunks.

    'repo' and 'tree' are the dulwich Repo and Tree objects the archive shall be
    created from. 'mtime' is a UNIX timestamp that is assigned as the modification
    time of all files in the resulting .tar.gz archive.
    """
    buf = BytesIO()
    with closing(tarfile.open(None, "w:%s" % format, buf)) as tar:
        for entry_abspath, entry in walk_tree(repo, tree):
            try:
                blob = repo[entry.sha]
            except KeyError:
                # Entry probably refers to a submodule, which we don't yet support.
                continue
            data = ListBytesIO(blob.chunked)

            info = tarfile.TarInfo()
            info.name = entry_abspath
            info.size = blob.raw_length()
            info.mode = entry.mode
            info.mtime = mtime

            tar.addfile(info, data)
            yield buf.getvalue()
            buf.truncate(0)
            buf.seek(0)
    yield buf.getvalue()
Example #29
0
 def get(self, path, mode='rb'):
     name = self.get_name(path)
     key = self.bucket.get_key(name)
     f = BytesIO()
     key.get_contents_to_file(f)
     f.seek(0)
     return f
Example #30
0
def unbind(filedata):
    """This is old code I didn't bother refactoring, sorry for the mess"""
    binded = BytesIO(filedata)

    binded.seek(-4, os.SEEK_END)
    indexsize = binded.read(4)
    indexsize = struct.unpack('<I', indexsize)[0]

    binded.seek(0 - (indexsize + 4), os.SEEK_END)
    endofdata = binded.tell()

    indexdata = binded.read(indexsize - 1)

    binded.seek(endofdata, os.SEEK_SET)

    files = []
    for line in reversed(indexdata.splitlines()):
        file = line.split(b'/')
        if (file[0] != b"__END_OF_SERIES_OF_BINDED_FILES__"):
            fstart = int(file[1], 10)
            fend = binded.tell()
            fsize = (fend - fstart)
            binded.seek(-fsize, os.SEEK_CUR)
            buffer = binded.read(fsize + 1)
            name = file[0].decode('utf-8') + getfileext(buffer)
            if (fstart > 0):
                binded.seek(fstart - 1, os.SEEK_SET)
            files.append((name, buffer))
    return files
Example #31
0
async def draw_herostatstable(table_args, hero_stat_categories,
                              leveled_hero_stats):
    category = None
    for cat in hero_stat_categories:
        if any(stat["stat"] == table_args.stat for stat in cat["stats"]):
            category = cat
            break
    if category is None:
        raise UserError("Couldn't find referenced stat")

    stats = category["stats"]

    # sort / get data
    hero_data = leveled_hero_stats[table_args.hero_level]
    hero_data = sorted(hero_data,
                       key=lambda hero: hero.get(table_args.stat),
                       reverse=not table_args.reverse)
    hero_data = hero_data[0:table_args.hero_count]

    table = Table(border_size=10)

    stat_highlight_color = discord_color1
    table_background = discord_color2
    table_border_color = discord_color1

    header_row = [TextCell("")]
    for stat in stats:
        header_row.append(
            SlantedTextCell(stat["name"],
                            font_size=20,
                            background=stat_highlight_color if stat["stat"]
                            == table_args.stat else table_background,
                            border_color=table_border_color,
                            border_size=2,
                            rotation=45))

    header_height = max(cell.height for cell in header_row)
    padding_right = int(header_height / math.tan(header_row[-1].rotation_rad))
    table.border_size[1] = padding_right

    table.add_row(header_row)

    i = 0
    for hero in hero_data:
        cell_background = table_background
        #cell_background = stat_highlight_color if i % 2 else table_background
        new_row = [
            ImageCell(img=await get_hero_icon(hero.get("id")),
                      padding=3,
                      border_color=table_border_color,
                      border_size=2,
                      background=table_background)
        ]
        for stat in stats:
            value = hero.get(stat["stat"])
            if stat.get("display") == "resistance_percentage":
                value = 100 * (1 - value)
            if stat.get("display") == "int":
                value = round(value)
            value = f"{value:.2f}"
            value = re.sub("\.0+$", "", value)
            if stat.get("display") == "resistance_percentage":
                value += "%"
            new_row.append(
                TextCell(value,
                         font_size=16,
                         padding=10,
                         border_color=table_border_color,
                         border_size=2,
                         background=stat_highlight_color if stat["stat"]
                         == table_args.stat else cell_background))
        table.add_row(new_row)
        i += 1

    image = table.render()

    fp = BytesIO()
    image.save(fp, format="PNG")
    fp.seek(0)

    return fp
Example #32
0
def create_aww_performance_excel_file(excel_data,
                                      data_type,
                                      month,
                                      state,
                                      district=None,
                                      block=None):
    aggregation_level = 3 if block else (2 if district else 1)
    export_info = excel_data[1][1]
    excel_data = [line[aggregation_level:] for line in excel_data[0][1]]
    thin_border = Border(left=Side(style='thin'),
                         right=Side(style='thin'),
                         top=Side(style='thin'),
                         bottom=Side(style='thin'))
    warp_text_alignment = Alignment(wrap_text=True)
    bold_font = Font(bold=True)
    blue_fill = PatternFill("solid", fgColor="B3C5E5")
    grey_fill = PatternFill("solid", fgColor="BFBFBF")

    workbook = Workbook()
    worksheet = workbook.active
    worksheet.title = "AWW Performance Report"
    worksheet.sheet_view.showGridLines = False
    # sheet title
    worksheet.merge_cells('B2:{0}2'.format("K" if aggregation_level == 3 else (
        "L" if aggregation_level == 2 else "M")))
    title_cell = worksheet['B2']
    title_cell.fill = PatternFill("solid", fgColor="4472C4")
    title_cell.value = "AWW Performance Report for the month of {}".format(
        month)
    title_cell.font = Font(size=18, color="FFFFFF")
    title_cell.alignment = Alignment(horizontal="center")

    # sheet header
    header_cells = ["B3", "C3", "D3", "E3", "F3", "G3", "H3", "I3", "J3", "K3"]
    if aggregation_level < 3:
        header_cells.append("L3")
    if aggregation_level < 2:
        header_cells.append("M3")

    for cell in header_cells:
        worksheet[cell].fill = blue_fill
        worksheet[cell].font = bold_font
        worksheet[cell].alignment = warp_text_alignment
    worksheet.merge_cells('B3:C3')
    worksheet['B3'].value = "State: {}".format(state)
    if district:
        worksheet['D3'].value = "District: {}".format(district)
    worksheet.merge_cells('E3:F3')
    if block:
        worksheet['E3'].value = "Block: {}".format(block)

    # table header
    table_header_position_row = 5
    headers = ["S.No"]
    if aggregation_level < 2:
        headers.append("District")
    if aggregation_level < 3:
        headers.append("Block")

    headers.extend([
        'Supervisor', 'AWC', 'AWW Name', 'AWW Contact Number',
        'Home Visits Conducted', 'Weighing Efficiency',
        'AWW Eligible for Incentive', 'Number of Days AWC was Open',
        'AWH Eligible for Incentive'
    ])
    columns = ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
    if aggregation_level < 3:
        columns.append('L')
    if aggregation_level < 2:
        columns.append('M')

    table_header = {}
    for col, header in zip(columns, headers):
        table_header[col] = header
    for column, value in table_header.items():
        cell = "{}{}".format(column, table_header_position_row)
        worksheet[cell].fill = grey_fill
        worksheet[cell].border = thin_border
        worksheet[cell].font = bold_font
        worksheet[cell].alignment = warp_text_alignment
        worksheet[cell].value = value

    # table contents
    row_position = table_header_position_row + 1

    for enum, row in enumerate(excel_data[1:], start=1):
        for column_index in range(len(columns)):
            column = columns[column_index]
            cell = "{}{}".format(column, row_position)
            worksheet[cell].border = thin_border
            if column_index == 0:
                worksheet[cell].value = enum
            else:
                worksheet[cell].value = row[column_index - 1]
        row_position += 1

    # sheet dimensions
    title_row = worksheet.row_dimensions[2]
    title_row.height = 23
    worksheet.row_dimensions[table_header_position_row].height = 46
    widths = {}
    widths_columns = ['A']
    widths_columns.extend(columns)
    standard_widths = [4, 7, 15]
    standard_widths.extend([15] * (3 - aggregation_level))
    standard_widths.extend([13, 12, 13, 15, 11, 14, 14])
    standard_widths.append(14)

    for col, width in zip(widths_columns, standard_widths):
        widths[col] = width
    widths['C'] = max(widths['C'], len(state) * 4 // 3 if state else 0)
    widths['D'] = 13 + (len(district) * 4 // 3 if district else 0)
    widths['F'] = max(widths['F'], len(block) * 4 // 3 if block else 0)
    for column in ["C", "E", "G"]:
        if widths[column] > 25:
            worksheet.row_dimensions[3].height = max(
                16 * ((widths[column] // 25) + 1),
                worksheet.row_dimensions[3].height)
            widths[column] = 25
    columns = columns[1:]
    # column widths based on table contents
    for column_index in range(len(columns)):
        widths[columns[column_index]] = max(
            widths[columns[column_index]],
            max(
                len(row[column_index].decode('utf-8') if isinstance(
                    row[column_index], bytes) else six.
                    text_type(row[column_index])) for row in excel_data[1:]) *
            4 // 3 if len(excel_data) >= 2 else 0)

    for column, width in widths.items():
        worksheet.column_dimensions[column].width = width

    # export info
    worksheet2 = workbook.create_sheet("Export Info")
    worksheet2.column_dimensions['A'].width = 14
    for n, export_info_item in enumerate(export_info, start=1):
        worksheet2['A{0}'.format(n)].value = export_info_item[0]
        worksheet2['B{0}'.format(n)].value = export_info_item[1]

    # saving file
    key = get_performance_report_blob_key(state, district, block, month,
                                          'xlsx')
    export_file = BytesIO()
    icds_file, _ = IcdsFile.objects.get_or_create(blob_id=key,
                                                  data_type=data_type)
    workbook.save(export_file)
    export_file.seek(0)
    icds_file.store_file_in_blobdb(export_file, expired=None)
    icds_file.save()
    return key
Example #33
0
    for t in x:
        r = np.random.normal(loc=0.0, scale=(0.5 + t * t / 3), size=None)
        y.append(r)
    return x, 1.726 * x - 0.84 + np.array(y)


x, y = make_random_data()

plt.plot(x, y, 'o')
#plt.savefig('images/14_03.png', dpi=300)
#plt.show()

img = BytesIO()
plt.savefig(img, dpi=300)
plt.close()
img.seek(0)

ml14_plot_url1 = b64encode(img.getvalue()).decode('ascii')

# ### Executing objects in a TensorFlow graph using their names

# In[24]:

## train/test splits:
x_train, y_train = x[:100], y[:100]
x_test, y_test = x[100:], y[100:]

## training the model
n_epochs = 500
training_costs = []
with tf.Session(graph=g) as sess:
Example #34
0
    def _generate_card(buffer: BytesIO, content: dict) -> BytesIO:
        """
        Generate a card from snake information.

        Written by juan and Someone during the first code jam.
        """
        snake = Image.open(buffer)

        # Get the size of the snake icon, configure the height of the image box (yes, it changes)
        icon_width = 347  # Hardcoded, not much i can do about that
        icon_height = int((icon_width / snake.width) * snake.height)
        frame_copies = icon_height // CARD['frame'].height + 1
        snake.thumbnail((icon_width, icon_height))

        # Get the dimensions of the final image
        main_height = icon_height + CARD['top'].height + CARD['bottom'].height
        main_width = CARD['frame'].width

        # Start creating the foreground
        foreground = Image.new("RGBA", (main_width, main_height), (0, 0, 0, 0))
        foreground.paste(CARD['top'], (0, 0))

        # Generate the frame borders to the correct height
        for offset in range(frame_copies):
            position = (0, CARD['top'].height + offset * CARD['frame'].height)
            foreground.paste(CARD['frame'], position)

        # Add the image and bottom part of the image
        foreground.paste(snake, (36, CARD['top'].height))  # Also hardcoded :(
        foreground.paste(CARD['bottom'], (0, CARD['top'].height + icon_height))

        # Setup the background
        back = random.choice(CARD['backs'])
        back_copies = main_height // back.height + 1
        full_image = Image.new("RGBA", (main_width, main_height), (0, 0, 0, 0))

        # Generate the tiled background
        for offset in range(back_copies):
            full_image.paste(back, (16, 16 + offset * back.height))

        # Place the foreground onto the final image
        full_image.paste(foreground, (0, 0), foreground)

        # Get the first two sentences of the info
        description = '.'.join(content['info'].split(".")[:2]) + '.'

        # Setup positioning variables
        margin = 36
        offset = CARD['top'].height + icon_height + margin

        # Create blank rectangle image which will be behind the text
        rectangle = Image.new("RGBA", (main_width, main_height), (0, 0, 0, 0))

        # Draw a semi-transparent rectangle on it
        rect = ImageDraw.Draw(rectangle)
        rect.rectangle(
            (margin, offset, main_width - margin, main_height - margin),
            fill=(63, 63, 63, 128))

        # Paste it onto the final image
        full_image.paste(rectangle, (0, 0), mask=rectangle)

        # Draw the text onto the final image
        draw = ImageDraw.Draw(full_image)
        for line in textwrap.wrap(description, 36):
            draw.text([margin + 4, offset], line, font=CARD['font'])
            offset += CARD['font'].getsize(line)[1]

        # Get the image contents as a BufferIO object
        buffer = BytesIO()
        full_image.save(buffer, 'PNG')
        buffer.seek(0)

        return buffer
Example #35
0
def fig_to_base64(figure):
    plt_bytes = BytesIO()
    figure.savefig(plt_bytes, format='png')
    plt_bytes.seek(0)

    return plt_bytes
Example #36
0
    def run_test(self):
        url = urlparse.urlparse(self.nodes[0].url)
        print "Mining blocks..."

        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[2].generate(100)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), 500)

        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        self.sync_all()
        self.nodes[2].generate(1)
        self.sync_all()
        bb_hash = self.nodes[0].getbestblockhash()

        assert_equal(self.nodes[1].getbalance(),
                     Decimal("0.1"))  #balance now should be 0.1 on node 1

        # load the latest 0.1 tx over the REST API
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        vintx = json_obj['vin'][0][
            'txid']  # get the vin to later check for utxo (should be spent by then)
        # get n of 0.1 outpoint
        n = 0
        for vout in json_obj['vout']:
            if vout['value'] == 0.1:
                n = vout['n']

        ######################################
        # GETUTXOS: query a unspent outpoint #
        ######################################
        json_request = '/checkmempool/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)

        #check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)

        #make sure there is one utxo
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['utxos'][0]['value'], 0.1)

        ################################################
        # GETUTXOS: now query a already spent outpoint #
        ################################################
        json_request = '/checkmempool/' + vintx + '-0'
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)

        #check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)

        #make sure there is no utox in the response because this oupoint has been spent
        assert_equal(len(json_obj['utxos']), 0)

        #check bitmap
        assert_equal(json_obj['bitmap'], "0")

        ##################################################
        # GETUTXOS: now check both with the same request #
        ##################################################
        json_request = '/checkmempool/' + txid + '-' + str(
            n) + '/' + vintx + '-0'
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['bitmap'], "10")

        #test binary response
        bb_hash = self.nodes[0].getbestblockhash()

        binaryRequest = b'\x01\x02'
        binaryRequest += hex_str_to_bytes(txid)
        binaryRequest += pack("i", n)
        binaryRequest += hex_str_to_bytes(vintx)
        binaryRequest += pack("i", 0)

        bin_response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', binaryRequest)
        output = BytesIO()
        output.write(bin_response)
        output.seek(0)
        chainHeight = unpack("i", output.read(4))[0]
        hashFromBinResponse = hex(
            deser_uint256(output))[2:].zfill(65).rstrip("L")

        assert_equal(
            bb_hash, hashFromBinResponse
        )  #check if getutxo's chaintip during calculation was fine
        assert_equal(chainHeight, 102)  #chain height must be 102

        ############################
        # GETUTXOS: mempool checks #
        ############################

        # do a tx and don't sync
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        vintx = json_obj['vin'][0][
            'txid']  # get the vin to later check for utxo (should be spent by then)
        # get n of 0.1 outpoint
        n = 0
        for vout in json_obj['vout']:
            if vout['value'] == 0.1:
                n = vout['n']

        json_request = '/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(
            len(json_obj['utxos']), 0
        )  #there should be a outpoint because it has just added to the mempool

        json_request = '/checkmempool/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(
            len(json_obj['utxos']), 1
        )  #there should be a outpoint because it has just added to the mempool

        #do some invalid requests
        json_request = '{"checkmempool'
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'json', json_request,
            True)
        assert_equal(
            response.status,
            500)  #must be a 500 because we send a invalid json request

        json_request = '{"checkmempool'
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', json_request,
            True)
        assert_equal(response.status,
                     500)  #must be a 500 because we send a invalid bin request

        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos/checkmempool' + self.FORMAT_SEPARATOR + 'bin', '',
            True)
        assert_equal(response.status,
                     500)  #must be a 500 because we send a invalid bin request

        #test limits
        json_request = '/checkmempool/'
        for x in range(0, 20):
            json_request += txid + '-' + str(n) + '/'
        json_request = json_request.rstrip("/")
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json',
            '', True)
        assert_equal(response.status,
                     500)  #must be a 500 because we exceeding the limits

        json_request = '/checkmempool/'
        for x in range(0, 15):
            json_request += txid + '-' + str(n) + '/'
        json_request = json_request.rstrip("/")
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json',
            '', True)
        assert_equal(response.status,
                     200)  #must be a 500 because we exceeding the limits

        self.nodes[0].generate(1)  #generate block to not affect upcoming tests
        self.sync_all()

        ################
        # /rest/block/ #
        ################

        # check binary format
        response = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
        assert_equal(response.status, 200)
        assert_greater_than(int(response.getheader('content-length')), 80)
        response_str = response.read()

        # compare with block header
        response_header = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
        assert_equal(response_header.status, 200)
        assert_equal(int(response_header.getheader('content-length')), 80)
        response_header_str = response_header.read()
        assert_equal(response_str[0:80], response_header_str)

        # check block hex format
        response_hex = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(response_hex.status, 200)
        assert_greater_than(int(response_hex.getheader('content-length')), 160)
        response_hex_str = response_hex.read()
        assert_equal(
            encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])

        # compare with hex block header
        response_header_hex = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(response_header_hex.status, 200)
        assert_greater_than(
            int(response_header_hex.getheader('content-length')), 160)
        response_header_hex_str = response_header_hex.read()
        assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
        assert_equal(
            encode(response_header_str, "hex_codec")[0:160],
            response_header_hex_str[0:160])

        # check json format
        block_json_string = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + 'json')
        block_json_obj = json.loads(block_json_string)
        assert_equal(block_json_obj['hash'], bb_hash)

        # compare with json block header
        response_header_json = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "json",
            True)
        assert_equal(response_header_json.status, 200)
        response_header_json_str = response_header_json.read().decode('utf-8')
        json_obj = json.loads(response_header_json_str, parse_float=Decimal)
        assert_equal(len(json_obj),
                     1)  #ensure that there is one header in the json response
        assert_equal(json_obj[0]['hash'],
                     bb_hash)  #request/response hash should be the same

        #compare with normal RPC block response
        rpc_block_json = self.nodes[0].getblock(bb_hash)
        assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
        assert_equal(json_obj[0]['confirmations'],
                     rpc_block_json['confirmations'])
        assert_equal(json_obj[0]['height'], rpc_block_json['height'])
        assert_equal(json_obj[0]['version'], rpc_block_json['version'])
        assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
        assert_equal(json_obj[0]['time'], rpc_block_json['time'])
        assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
        assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
        assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
        assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
        assert_equal(json_obj[0]['previousblockhash'],
                     rpc_block_json['previousblockhash'])

        #see if we can get 5 headers in one response
        self.nodes[1].generate(5)
        self.sync_all()
        response_header_json = http_get_call(
            url.hostname, url.port,
            '/rest/headers/5/' + bb_hash + self.FORMAT_SEPARATOR + "json",
            True)
        assert_equal(response_header_json.status, 200)
        response_header_json_str = response_header_json.read().decode('utf-8')
        json_obj = json.loads(response_header_json_str)
        assert_equal(len(json_obj), 5)  #now we should have 5 header objects

        # do tx test
        tx_hash = block_json_obj['tx'][0]['txid']
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        assert_equal(json_obj['txid'], tx_hash)

        # check hex format response
        hex_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(hex_string.status, 200)
        assert_greater_than(int(response.getheader('content-length')), 10)

        # check block tx details
        # let's make 3 tx and mine them on node 1
        txs = []
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        self.sync_all()

        # check that there are exactly 3 transactions in the TX memory pool before generating the block
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/mempool/info' + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(json_obj['size'], 3)
        # the size of the memory pool should be greater than 3x ~100 bytes
        assert_greater_than(json_obj['bytes'], 300)

        # check that there are our submitted transactions in the TX memory pool
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/mempool/contents' + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in txs:
            assert_equal(tx in json_obj, True)

        # now mine the transactions
        newblockhash = self.nodes[1].generate(1)
        self.sync_all()

        #check if the 3 tx show up in the new block
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in json_obj['tx']:
            if not 'coinbase' in tx['vin'][0]:  #exclude coinbase
                assert_equal(tx['txid'] in txs, True)

        #check the same but without tx details
        json_string = http_get_call(
            url.hostname, url.port, '/rest/block/notxdetails/' +
            newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in txs:
            assert_equal(tx in json_obj['tx'], True)

        #test rest bestblock
        bb_hash = self.nodes[0].getbestblockhash()

        json_string = http_get_call(url.hostname, url.port,
                                    '/rest/chaininfo.json')
        json_obj = json.loads(json_string)
        assert_equal(json_obj['bestblockhash'], bb_hash)
Example #37
0
        def do_POST(self):
            """Serve a POST request."""
            # First, we save the post data
            r, info = self.deal_post_data()
            print((r, info, "by: ", self.client_address))

            # And write the response web page
            f = BytesIO()
            f.write(
                b"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\"><html>"
            )
            f.write(b"<title>qr-filetransfer</title>")
            f.write(
                b"<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">"
            )
            f.write(
                b"<link href=\"https://fonts.googleapis.com/css?family=Comfortaa\" rel=\"stylesheet\">"
            )
            f.write(
                b"<link rel=\"icon\" href=\"https://raw.githubusercontent.com/sdushantha/qr-filetransfer/master/logo.png\" type=\"image/png\">"
            )
            f.write(b"<center>")
            f.write(b"<body>")
            f.write(
                b"<h2 style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Upload Result Page</h2>"
            )
            f.write(b"<hr>")

            if r:
                f.write(
                    b"<strong style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Success: </strong>"
                )
            else:
                f.write(
                    b"<strong style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Failed: </strong>"
                )

            f.write((
                "<span style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">%s</span><br>"
                % info).encode())
            f.write((
                "<br><a href=\"%s\" style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">back</a>"
                % self.headers['referer']).encode())
            f.write(
                b"<hr><small style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Powerd By: "
            )
            f.write(b"<a href=\"https://github.com/sdushantha/\">")
            f.write(b"sdushantha</a> and \n")
            f.write(b"<a href=\"https://github.com/npes87184/\">")
            f.write(b"npes87184</a>, check new version at \n")
            f.write(b"<a href=\"https://pypi.org/project/qr-filetransfer/\">")
            f.write(b"here</a>.</small></body>\n</html>\n")
            length = f.tell()
            f.seek(0)
            self.send_response(200)
            self.send_header("Content-type", "text/html; charset=utf-8")
            self.send_header("Content-Length", str(length))
            self.end_headers()
            if f:
                self.copyfile(f, self.wfile)
                f.close()
Example #38
0
 def write_response(self, socket_file, data):
     buf = BytesIO()
     self._write(buf, data)
     buf.seek(0)
     socket_file.write(buf.getvalue())
     socket_file.flush()
Example #39
0
def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
    """Parse a DICOM file until a condition is met.

    Parameters
    ----------
    fileobj : a file-like object
        Note that the file will not close when the function returns.
    stop_when :
        Stop condition. See ``read_dataset`` for more info.
    defer_size : int, str, None, optional
        See ``read_file`` for parameter info.
    force : boolean
        See ``read_file`` for parameter info.

    Notes
    -----
    Use ``read_file`` unless you need to stop on some condition
    other than reaching pixel data.

    Returns
    -------
    FileDataset instance or DicomDir instance.

    See Also
    --------
    read_file
        More generic file reading function.
    """
    # Read preamble -- raise an exception if missing and force=False
    preamble = read_preamble(fileobj, force)
    file_meta_dataset = Dataset()
    # Assume a transfer syntax, correct it as necessary
    is_implicit_VR = True
    is_little_endian = True
    if preamble:
        file_meta_dataset = _read_file_meta_info(fileobj)
        transfer_syntax = file_meta_dataset.get("TransferSyntaxUID")
        if transfer_syntax is None:  # issue 258
            pass
        elif transfer_syntax == pydicom.uid.ImplicitVRLittleEndian:
            pass
        elif transfer_syntax == pydicom.uid.ExplicitVRLittleEndian:
            is_implicit_VR = False
        elif transfer_syntax == pydicom.uid.ExplicitVRBigEndian:
            is_implicit_VR = False
            is_little_endian = False
        elif transfer_syntax == pydicom.uid.DeflatedExplicitVRLittleEndian:
            # See PS3.6-2008 A.5 (p 71)
            # when written, the entire dataset following
            #     the file metadata was prepared the normal way,
            #     then "deflate" compression applied.
            #  All that is needed here is to decompress and then
            #     use as normal in a file-like object
            zipped = fileobj.read()
            # -MAX_WBITS part is from comp.lang.python answer:
            # groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
            unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
            fileobj = BytesIO(unzipped)  # a file-like object
            is_implicit_VR = False
        else:
            # Any other syntax should be Explicit VR Little Endian,
            #   e.g. all Encapsulated (JPEG etc) are ExplVR-LE
            #        by Standard PS 3.5-2008 A.4 (p63)
            is_implicit_VR = False
    else:  # no header -- use the is_little_endian, implicit assumptions
        file_meta_dataset.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
        endian_chr = "<"
        element_struct = Struct(endian_chr + "HH2sH")
        # Try reading first 8 bytes
        group, elem, VR, length = element_struct.unpack(fileobj.read(8))
        # Rewind file object
        fileobj.seek(0)
        # If the VR is a valid VR, assume Explicit VR transfer systax
        from pydicom.values import converters
        if not in_py2:
            VR = VR.decode(default_encoding)
        if VR in converters.keys():
            is_implicit_VR = False
            # Determine if group in low numbered range (Little vs Big Endian)
            if group == 0:  # got (0,0) group length. Not helpful.
                # XX could use similar to http://www.dclunie.com/medical-image-faq/html/part2.html code example
                msg = ("Not able to guess transfer syntax when first item "
                       "is group length")
                raise NotImplementedError(msg)
            if group < 2000:
                file_meta_dataset.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
            else:
                file_meta_dataset.TransferSyntaxUID = pydicom.uid.ExplicitVRBigEndian
                is_little_endian = False

    try:
        dataset = read_dataset(fileobj,
                               is_implicit_VR,
                               is_little_endian,
                               stop_when=stop_when,
                               defer_size=defer_size)
    except EOFError:
        pass  # error already logged in read_dataset

    class_uid = file_meta_dataset.get("MediaStorageSOPClassUID", None)
    if class_uid and class_uid == "Media Storage Directory Storage":
        return DicomDir(fileobj, dataset, preamble, file_meta_dataset,
                        is_implicit_VR, is_little_endian)
    else:
        return FileDataset(fileobj, dataset, preamble, file_meta_dataset,
                           is_implicit_VR, is_little_endian)
Example #40
0
def create_lady_supervisor_excel_file(excel_data, data_type, month,
                                      aggregation_level):
    export_info = excel_data[1][1]
    state = export_info[1][1] if aggregation_level > 0 else ''
    district = export_info[2][1] if aggregation_level > 1 else ''
    block = export_info[3][1] if aggregation_level > 2 else ''
    excel_data = [line[aggregation_level:] for line in excel_data[0][1]]
    thin_border = Border(left=Side(style='thin'),
                         right=Side(style='thin'),
                         top=Side(style='thin'),
                         bottom=Side(style='thin'))
    warp_text_alignment = Alignment(wrap_text=True)
    bold_font = Font(bold=True)
    blue_fill = PatternFill("solid", fgColor="B3C5E5")
    grey_fill = PatternFill("solid", fgColor="BFBFBF")

    workbook = Workbook()
    worksheet = workbook.active
    worksheet.title = "LS Performance Report"
    worksheet.sheet_view.showGridLines = False
    # sheet title
    amount_of_columns = 9 - aggregation_level
    last_column = string.ascii_uppercase[amount_of_columns]
    worksheet.merge_cells('B2:{0}2'.format(last_column))
    title_cell = worksheet['B2']
    title_cell.fill = PatternFill("solid", fgColor="4472C4")
    title_cell.value = "Lady Supervisor Performance Report for the {}".format(
        month)
    title_cell.font = Font(size=18, color="FFFFFF")
    title_cell.alignment = Alignment(horizontal="center")

    columns = [
        string.ascii_uppercase[i] for i in range(1, amount_of_columns + 1)
    ]

    # sheet header
    header_cells = ['{0}3'.format(column) for column in columns]
    for cell in header_cells:
        worksheet[cell].fill = blue_fill
        worksheet[cell].font = bold_font
        worksheet[cell].alignment = warp_text_alignment
    if state:
        worksheet['B3'].value = "State: {}".format(state)
        worksheet.merge_cells('B3:C3')
    if district:
        worksheet['D3'].value = "District: {}".format(district)
    if block:
        worksheet['E3'].value = "Block: {}".format(block)
    date_cell = '{0}3'.format(last_column)
    date_description_cell = '{0}3'.format(
        string.ascii_uppercase[amount_of_columns - 1])
    worksheet[date_description_cell].value = "Date when downloaded:"
    worksheet[date_description_cell].alignment = Alignment(horizontal="right")
    utc_now = datetime.now(pytz.utc)
    now_in_india = utc_now.astimezone(india_timezone)
    worksheet[date_cell].value = custom_strftime('{S} %b %Y', now_in_india)
    worksheet[date_cell].alignment = Alignment(horizontal="right")

    # table header
    table_header_position_row = 5
    header_data = excel_data[0]
    headers = ["S.No"]
    headers.extend(header_data)

    table_header = {}
    for col, header in zip(columns, headers):
        table_header[col] = header
    for column, value in table_header.items():
        cell = "{}{}".format(column, table_header_position_row)
        worksheet[cell].fill = grey_fill
        worksheet[cell].border = thin_border
        worksheet[cell].font = bold_font
        worksheet[cell].alignment = warp_text_alignment
        worksheet[cell].value = value

    # table contents
    row_position = table_header_position_row + 1

    for enum, row in enumerate(excel_data[1:], start=1):
        for column_index in range(len(columns)):
            column = columns[column_index]
            cell = "{}{}".format(column, row_position)
            worksheet[cell].border = thin_border
            if column_index == 0:
                worksheet[cell].value = enum
            else:
                worksheet[cell].value = row[column_index - 1]
        row_position += 1

    # sheet dimensions
    title_row = worksheet.row_dimensions[2]
    title_row.height = 23
    worksheet.row_dimensions[table_header_position_row].height = 46
    widths = {}
    widths_columns = ['A']
    widths_columns.extend(columns)
    standard_widths = [4, 7]
    standard_widths.extend([15] * (4 - aggregation_level))
    standard_widths.extend([25, 15, 25, 15])
    for col, width in zip(widths_columns, standard_widths):
        widths[col] = width
    widths['C'] = max(widths['C'], len(state) * 4 // 3 if state else 0)
    widths['D'] = 9 + (len(district) * 4 // 3 if district else 0)
    widths['E'] = 8 + (len(block) * 4 // 3 if district else 0)

    columns = columns[1:]
    # column widths based on table contents
    for column_index in range(len(columns)):
        widths[columns[column_index]] = max(
            widths[columns[column_index]],
            max(
                len(row[column_index].decode('utf-8') if isinstance(
                    row[column_index], bytes) else six.
                    text_type(row[column_index])) for row in excel_data[1:]) *
            4 // 3 if len(excel_data) >= 2 else 0)

    for column, width in widths.items():
        worksheet.column_dimensions[column].width = width

    # export info
    worksheet2 = workbook.create_sheet("Export Info")
    worksheet2.column_dimensions['A'].width = 14
    for n, export_info_item in enumerate(export_info, start=1):
        worksheet2['A{0}'.format(n)].value = export_info_item[0]
        worksheet2['B{0}'.format(n)].value = export_info_item[1]

    # saving file
    file_hash = uuid.uuid4().hex
    export_file = BytesIO()
    icds_file = IcdsFile(blob_id=file_hash, data_type=data_type)
    workbook.save(export_file)
    export_file.seek(0)
    icds_file.store_file_in_blobdb(export_file, expired=ONE_DAY)
    icds_file.save()
    return file_hash
Example #41
0
def df_to_bytes_np_records_(df: pd.DataFrame) -> bytes:
    b = BytesIO()
    rec = df.to_records()
    np.save(b, rec, allow_pickle=False)
    b.seek(0)
    return b.read()
Example #42
0
def saveArray(array, key):
    bio = BytesIO()
    numpy.save(bio, array)
    bio.flush()
    bio.seek(0)
    saveBytes(bio.getvalue(), key)
Example #43
0
def saveImageToFile(image, filename):
    print 'saves image ', filename
    # save full image
    image.save(filename)


#Main loop
while True:
    # Restart shooting thread
    if not liveViewThread.isAlive():
        liveViewThread = CameraThread()
        liveViewThread.start()

    # View Loop
    stream.seek(0)
    for bug in camera.capture_continuous(stream,
                                         format='jpeg',
                                         use_video_port=True,
                                         splitter_port=1):
        t1 = time.time()
        stream.seek(
            0
        )  # "Rewind" the stream to the beginning so we can read its content
        print('capture')

        # take a picture
        if GPIO.event_detected(SHOT_PIN):
            liveViewThread.exit = True
            image = Image.open(stream)
Example #44
0
 def df_to_bytes_parquet_(df: pd.DataFrame) -> bytes:
     b = BytesIO()
     table = pyarrow.Table.from_pandas(df)
     parquet.write_table(table, b)
     b.seek(0)
     return b.read()
Example #45
0
    def export(self, **kwargs: Any) -> Response:
        """Export dashboards
        ---
        get:
          description: >-
            Exports multiple Dashboards and downloads them as YAML files.
          parameters:
          - in: query
            name: q
            content:
              application/json:
                schema:
                  $ref: '#/components/schemas/get_export_ids_schema'
          responses:
            200:
              description: Dashboard export
              content:
                text/plain:
                  schema:
                    type: string
            400:
              $ref: '#/components/responses/400'
            401:
              $ref: '#/components/responses/401'
            404:
              $ref: '#/components/responses/404'
            422:
              $ref: '#/components/responses/422'
            500:
              $ref: '#/components/responses/500'
        """
        requested_ids = kwargs["rison"]

        if is_feature_enabled("VERSIONED_EXPORT"):
            timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
            root = f"dashboard_export_{timestamp}"
            filename = f"{root}.zip"

            buf = BytesIO()
            with ZipFile(buf, "w") as bundle:
                try:
                    for file_name, file_content in ExportDashboardsCommand(
                        requested_ids
                    ).run():
                        with bundle.open(f"{root}/{file_name}", "w") as fp:
                            fp.write(file_content.encode())
                except DashboardNotFoundError:
                    return self.response_404()
            buf.seek(0)

            return send_file(
                buf,
                mimetype="application/zip",
                as_attachment=True,
                attachment_filename=filename,
            )

        query = self.datamodel.session.query(Dashboard).filter(
            Dashboard.id.in_(requested_ids)
        )
        query = self._base_filters.apply_all(query)
        ids = [item.id for item in query.all()]
        if not ids:
            return self.response_404()
        export = Dashboard.export_dashboards(ids)
        resp = make_response(export, 200)
        resp.headers["Content-Disposition"] = generate_download_headers("json")[
            "Content-Disposition"
        ]
        return resp
Example #46
0
def testppt():
    out_file = BytesIO()

    imagefile = BytesIO()
    # imagefile = StringIO()
    c = create_chart_object()
    c.save(imagefile, 'png', scale_factor=1.9)
    imagefile.seek(0)

    path = 'template.pptx'
    prs = Presentation(path)
    # blank_slide_layout = prs.slide_layouts[6]
    # slide = prs.slides.add_slide(blank_slide_layout)
    title_only_slide_layout = prs.slide_layouts[4]
    slide = prs.slides.add_slide(title_only_slide_layout)
    shapes = slide.shapes
    shapes.title.text = 'Adding an AutoShape'

    left = Pt(50)
    top = Pt(300)
    width = Pt(700)
    height = Pt(350)
    txBox = shapes.add_textbox(left, top, width, height)
    tf = txBox.text_frame
    tf.auto_size = MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT
    tf.word_wrap = True
    tf.clear()  # remove any existing paragraphs, leaving one empty one
    p = tf.paragraphs[0]
    p.alignment = PP_ALIGN.LEFT
    p.text = "LS 1234567"
    p.font.bold = True
    p = tf.add_paragraph()
    p.alignment = PP_ALIGN.LEFT
    p.text = "This is a second paragraph containing some comment"
    # top = Inches(1.5)
    # left = Inches(5)
    top = Pt(300)
    left = Pt(900)
    pic = shapes.add_picture(imagefile, left, top, height=Pt(500))

    symptoms = [{
        "abbr": "A",
        "symp": "bubu"
    }, {
        "abbr": "B",
        "symp": "bibi"
    }, {
        "abbr": "CA",
        "symp": "lala"
    }]
    rows = len(symptoms) + 1
    cols = 2
    left = Pt(50)
    top = Pt(650)
    width = Pt(350)
    height = Pt(150)

    table = shapes.add_table(rows, cols, left, top, width, height).table
    # set column widths
    table.columns[0].width = Pt(70)
    table.columns[1].width = Pt(300)

    table.cell(0, 0).text = 'Abbreviation'
    table.cell(0, 1).text = 'Symptom'

    for i in range(rows - 1):
        table.cell(i, 0).text = symptoms[i]['abbr']
        table.cell(i, 1).text = symptoms[i]['symp']

    prs.save(out_file)
    out_file.seek(0)
    return send_file(out_file,
                     attachment_filename="testing.pptx",
                     as_attachment=True)
Example #47
0
def test_complete():
    fig = plt.figure('Figure with a label?', figsize=(10, 6))

    plt.suptitle('Can you fit any more in a figure?')

    # make some arbitrary data
    x, y = np.arange(8), np.arange(10)
    data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
    v = np.sin(v * -0.6)

    # Ensure lists also pickle correctly.
    plt.subplot(3, 3, 1)
    plt.plot(list(range(10)))

    plt.subplot(3, 3, 2)
    plt.contourf(data, hatches=['//', 'ooo'])
    plt.colorbar()

    plt.subplot(3, 3, 3)
    plt.pcolormesh(data)

    plt.subplot(3, 3, 4)
    plt.imshow(data)

    plt.subplot(3, 3, 5)
    plt.pcolor(data)

    ax = plt.subplot(3, 3, 6)
    ax.set_xlim(0, 7)
    ax.set_ylim(0, 9)
    plt.streamplot(x, y, u, v)

    ax = plt.subplot(3, 3, 7)
    ax.set_xlim(0, 7)
    ax.set_ylim(0, 9)
    plt.quiver(x, y, u, v)

    plt.subplot(3, 3, 8)
    plt.scatter(x, x**2, label='$x^2$')
    plt.legend(loc='upper left')

    plt.subplot(3, 3, 9)
    plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)

    #
    # plotting is done, now test its pickle-ability
    #
    result_fh = BytesIO()
    pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)

    plt.close('all')

    # make doubly sure that there are no figures left
    assert plt._pylab_helpers.Gcf.figs == {}

    # wind back the fh and load in the figure
    result_fh.seek(0)
    fig = pickle.load(result_fh)

    # make sure there is now a figure manager
    assert plt._pylab_helpers.Gcf.figs != {}

    assert fig.get_label() == 'Figure with a label?'
Example #48
0
def prediction_interval_plot_as_png_base64(df,
                                           time,
                                           signal,
                                           estimator,
                                           lower,
                                           upper,
                                           name=None,
                                           max_length=1000):
    assert df.shape[0] > 0
    assert df.shape[1] > 0
    assert time in df.columns
    assert signal in df.columns
    assert estimator in df.columns
    assert lower in df.columns
    assert upper in df.columns

    df1 = df.tail(max_length).copy()
    lMin = np.mean(df1[signal]) - np.std(df1[signal]) * 3
    lMax = np.mean(df1[signal]) + np.std(df1[signal]) * 3
    df1[lower] = df1[lower].apply(lambda x: x
                                  if (np.isnan(x) or x >= lMin) else np.nan)
    df1[upper] = df1[upper].apply(lambda x: x
                                  if (np.isnan(x) or x <= lMax) else np.nan)

    # last value of the signal
    lLastSignalPos = df1[signal].dropna().tail(1).index
    lEstimtorValue = df1[estimator][lLastSignalPos]
    df1.loc[lLastSignalPos, lower] = lEstimtorValue
    df1.loc[lLastSignalPos, upper] = lEstimtorValue

    import matplotlib

    # matplotlib.use('Agg')
    import matplotlib.pyplot as plt

    plt.switch_backend("Agg")
    fig, axs = plt.subplots(ncols=1, figsize=(16, 8))
    df1.plot.line(
        time,
        [signal, estimator, lower, upper],
        color=[SIGNAL_COLOR, FORECAST_COLOR, FORECAST_COLOR, FORECAST_COLOR],
        ax=axs,
        grid=True,
        legend=False,
    )
    add_patched_legend(axs, [signal, estimator, lower, upper])

    x = df1[time]
    type1 = np.dtype(x)
    if type1.kind == "M":
        x = x.apply(lambda t: t.date())
    axs.fill_between(x.values,
                     df1[lower],
                     df1[upper],
                     color=SHADED_COLOR,
                     alpha=0.5)

    figfile = BytesIO()
    fig.savefig(figfile, format="png")
    plt.close(fig)
    figfile.seek(0)  # rewind to beginning of file
    figdata_png = base64.b64encode(figfile.getvalue())
    return figdata_png.decode("utf8")
def wav_info_to_csv():
    '''
    To save wav files details to csv of a directory
    '''
    wav_info_tags = ["Filename", "Operator", "DeviceID", "Battery_Voltage", "Battery_Percentage",
                     "Network_status", "Firmare_Revision", "Time_Stamp", "Latitude", "Longitude",
                     "ChunkID", "TotalSize", "Format", "SubChunk1ID", "SubChunk1Size",
                     "AudioFormat", "NumChannels", "SampleRate", "ByteRate", "BlockAlign",
                     "BitsPerSample", "SubChunk2ID", "SubChunk2Size"]
    initial_number_of_devices = 0
    count = 0
    DICT = {}
    wav_file_name = "wav_file_extra_information_"+CURRENT_PATH.split("/")[-1]+".csv"
    print wav_file_name
    with open(wav_file_name, "w") as file_object:
        wav_information_object = csv.writer(file_object)
        wav_information_object.writerow(wav_info_tags)
        file_object.flush()

        for name in NAMES:
            if (name[-3:] == 'wav') or (name[-3:] == 'WAV'):
                time1 = ftp.voidcmd("MDTM " + name)
                count += 1
                DICT[name] = time1[4:]
        sorted_wav_files_list = sorted((value,key) for (key,value) in DICT.items())
        sorted_file_name_list = [element[1] for element in sorted_wav_files_list]

        for name in sorted_file_name_list:
            if (name[-3:] == 'wav') or (name[-3:] == 'WAV'):

                file_header_info = BytesIO(FtpFile(ftp, name).read(264))

                riff, size, fformat = struct.unpack('<4sI4s', file_header_info.read(12))
                chunkoffset = file_header_info.tell()

                chunk_header = file_header_info.read(8)
                subchunkid, subchunksize = struct.unpack('<4sI', chunk_header)
                chunkoffset = file_header_info.tell()

                aformat, channels, samplerate, byterate, blockalign, bps = struct.unpack('HHIIHH', file_header_info.read(16))
                chunkoffset = file_header_info.tell()

                struct.unpack('<4sI', file_header_info.read(8))
                struct.unpack('<4s4sI', file_header_info.read(12))
                chunkoffset = file_header_info.tell()

                extra_header = file_header_info.read(200)
                chunkoffset = file_header_info.tell()

                file_header_info.seek(chunkoffset)
                subchunk2id, subchunk2size = struct.unpack('<4sI', file_header_info.read(8))
                chunkoffset = file_header_info.tell()

                wav_header = riff, size, fformat, subchunkid, subchunksize, aformat, \
                channels, samplerate, byterate, blockalign, bps, subchunk2id, subchunk2size

                #Getting the wav information and writing the csv file rows
                wav_information = extra_header.decode("ascii").split(',')
                information_value = [name]
                for index_value, each_tag_value in enumerate(wav_information):
                    try:
                        _, corresponding_value = each_tag_value.split(":")
                    except ValueError:
                        corresponding_value = "".join(each_tag_value.split(":")[1:])
                    information_value.append(corresponding_value)
                for info in wav_header:
                    information_value.append(info)    
                wav_information_object.writerow(information_value)
                file_object.flush()
        print "wav file information saved to csv.."
Example #50
0
    def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False, check_all=False):
        '''
        Handles receiving of output from command
        '''
        self._matched_prompt = None
        self._matched_cmd_prompt = None
        recv = BytesIO()
        handled = False
        command_prompt_matched = False
        matched_prompt_window = window_count = 0

        command_timeout = self.get_option('persistent_command_timeout')
        self._validate_timeout_value(command_timeout, "persistent_command_timeout")

        buffer_read_timeout = self.get_option('persistent_buffer_read_timeout')
        self._validate_timeout_value(buffer_read_timeout, "persistent_buffer_read_timeout")

        while True:
            if command_prompt_matched:
                try:
                    signal.signal(signal.SIGALRM, self._handle_buffer_read_timeout)
                    signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout)
                    data = self._ssh_shell.recv(256)
                    signal.alarm(0)
                    # if data is still received on channel it indicates the prompt string
                    # is wrongly matched in between response chunks, continue to read
                    # remaining response.
                    command_prompt_matched = False

                    # restart command_timeout timer
                    signal.signal(signal.SIGALRM, self._handle_command_timeout)
                    signal.alarm(command_timeout)

                except AnsibleCmdRespRecv:
                    return self._command_response
            else:
                data = self._ssh_shell.recv(256)

            # when a channel stream is closed, received data will be empty
            if not data:
                break

            recv.write(data)
            offset = recv.tell() - 256 if recv.tell() > 256 else 0
            recv.seek(offset)

            window = self._strip(recv.read())
            window_count += 1

            if prompts and not handled:
                handled = self._handle_prompt(window, prompts, answer, newline, False, check_all)
                matched_prompt_window = window_count
            elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count:
                # check again even when handled, if same prompt repeats in next window
                # (like in the case of a wrong enable password, etc) indicates
                # value of answer is wrong, report this as error.
                if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check, check_all):
                    raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt)

            if self._find_prompt(window):
                self._last_response = recv.getvalue()
                resp = self._strip(self._last_response)
                self._command_response = self._sanitize(resp, command)
                if buffer_read_timeout == 0.0:
                    return self._command_response
                else:
                    command_prompt_matched = True
Example #51
0
def testplot(names, showChannels, infile, outfile, approved):
    x = []
    ch1 = []
    ch2 = []
    ch3 = []
    ch4 = []
    ch5 = []
    ch6 = []
    ch7 = []
    ch8 = []

    with open(infile, 'r') as csvfile:
        plots = csv.reader(csvfile, delimiter=',')
        fig, ax = plt.subplots()

        df = pd.read_csv(infile,
                         sep='\s*,\s*',
                         encoding='ascii',
                         engine='python')
        #df.set_index("Time", inplace=True)
        #print(df.head())
        headers = df.columns.tolist()
        #print(headers)
        #for row in plots:
        #   ch1.append(int(row[1]))
        test = df[headers[0]]
        print(df[headers[0]].head())

    if showChannels[0] == 'true':
        plt.plot(df[headers[0]], df[headers[1]], label=headers[1])
    if showChannels[1] == 'true':
        plt.plot(df[headers[0]], df[headers[2]], label=headers[2])
    if showChannels[2] == 'true':
        plt.plot(df[headers[0]], df[headers[3]], label=headers[3])
    if showChannels[3] == 'true':
        plt.plot(df[headers[0]], df[headers[4]], label=headers[4])
    if showChannels[4] == 'true':
        plt.plot(df[headers[0]], df[headers[5]], label=headers[5])
    if showChannels[5] == 'true':
        plt.plot(df[headers[0]], df[headers[6]], label=headers[6])
    if showChannels[6] == 'true':
        plt.plot(df[headers[0]], df[headers[7]], label=headers[7])
    if showChannels[7] == 'true':
        plt.plot(df[headers[0]], df[headers[8]], label=headers[8])

    ax.xaxis.set_major_locator(plt.MaxNLocator(3))
    ax.set_ylim(0, 1000)
    plt.setp(ax.get_xticklabels())
    plt.xlabel('time')
    plt.grid()
    plt.ylabel('mbar')
    plt.legend()

    imgdata = BytesIO()
    plt.savefig(imgdata, format='png', dpi=1200)
    imgdata.seek(0)

    im = ImageReader(imgdata)
    logo = ImageReader('/home/larskf/TestReportGenerator/DB_long.jpg')

    c = canvas.Canvas(outfile)
    w, h = c._pagesize
    c.drawImage(im, 0, 120, w, h, preserveAspectRatio=True)
    c.drawImage(logo, 100, h / 2 - 50, w - 200, h, preserveAspectRatio=True)

    start = datetime.datetime.strptime(df[headers[0]].values[0],
                                       '%d.%m.%Y %H:%M:%S')
    stop = datetime.datetime.strptime(df[headers[0]].values[-1],
                                      '%d.%m.%Y %H:%M:%S')
    dur = stop - start

    c.setFont('Helvetica-Bold', 13)
    str = "Pressure leak test"
    c.drawString(50, 725, str)
    c.setFont('Helvetica', 11)

    str = "Started logging at:"
    c.drawString(50, 300, str)
    str = "     {}".format(start)
    c.drawString(50, 280, str)
    str = "Test duration: {}".format(dur)
    c.drawString(50, 260, str)

    c.drawString(
        50, 255,
        "_______________________________________________________________________________"
    )
    c.setFont('Helvetica', 9)
    if showChannels[0] == 'true':
        maxval = max(df[headers[1]])
        minval = min(df[headers[1]])
        avgval = statistics.mean(df[headers[1]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[1], maxval, minval, avgval, maxval - minval)
        #str = "%s:  Max: %f.2  Min: %f.2 Avg: %f.2 " % (names[0], maxval, minval, avgval)
        c.drawString(50, 240, str)

    if showChannels[1] == 'true':
        maxval = max(df[headers[2]])
        minval = min(df[headers[2]])
        avgval = statistics.mean(df[headers[2]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[2], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 220, str)
    if showChannels[2] == 'true':
        maxval = max(df[headers[3]])
        minval = min(df[headers[3]])
        avgval = statistics.mean(df[headers[3]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[3], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 200, str)
    if showChannels[3] == 'true':
        maxval = max(df[headers[4]])
        minval = min(df[headers[4]])
        avgval = statistics.mean(df[headers[4]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[4], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 180, str)
    if showChannels[4] == 'true':
        maxval = max(df[headers[5]])
        minval = min(df[headers[5]])
        avgval = statistics.mean(df[headers[5]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[5], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 160, str)
    if showChannels[5] == 'true':
        maxval = max(df[headers[6]])
        minval = min(df[headers[6]])
        avgval = statistics.mean(df[headers[6]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[6], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 140, str)
    if showChannels[6] == 'true':
        maxval = max(df[headers[7]])
        minval = min(df[headers[7]])
        avgval = statistics.mean(df[headers[7]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[7], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 120, str)
    if showChannels[7] == 'true':
        maxval = max(df[headers[8]])
        minval = min(df[headers[8]])
        avgval = statistics.mean(df[headers[8]])
        str = "{}:  Max: {:.0f} mbar     Min: {:.0f} mbar     Avg: {:.0f} mbar      Drop: {:.0f} mbar".format(
            headers[8], maxval, minval, avgval, maxval - minval)
        c.drawString(50, 100, str)

    c.drawString(
        50, 95,
        "_______________________________________________________________________________"
    )

    c.setFont('Helvetica', 11)
    str = "Approved by: %s" % approved
    c.drawString(50, 75, str)
    c.drawString(50, 30, "_____________________________________")

    c.save()
Example #52
0
def plot_ROC(X, y, classifier, cv):
    from sklearn.metrics import roc_curve, auc
    from sklearn.model_selection import StratifiedKFold
    from scipy import interp
    cv = StratifiedKFold(n_splits=cv)

    tprs = []
    aucs = []
    mean_fpr = np.linspace(0, 1, 100)

    i = 0
    for train, test in cv.split(X, y):
        probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
        # Compute ROC curve and area the curve
        fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
        tprs.append(interp(mean_fpr, fpr, tpr))
        tprs[-1][0] = 0.0
        roc_auc = auc(fpr, tpr)
        aucs.append(roc_auc)
        plt.plot(fpr,
                 tpr,
                 lw=1,
                 alpha=0.3,
                 label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))

        i += 1
    #figure = plt.figure()
    plt.gcf().clear()
    plt.plot([0, 1], [0, 1],
             linestyle='--',
             lw=2,
             color='r',
             label='Luck',
             alpha=.8)

    mean_tpr = np.mean(tprs, axis=0)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    std_auc = np.std(aucs)
    plt.plot(mean_fpr,
             mean_tpr,
             color='b',
             label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
             lw=2,
             alpha=.8)

    std_tpr = np.std(tprs, axis=0)
    tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
    tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
    plt.fill_between(mean_fpr,
                     tprs_lower,
                     tprs_upper,
                     color='grey',
                     alpha=.2,
                     label=r'$\pm$ 1 std. dev.')

    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.0])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC')
    plt.legend(loc="lower right")
    from io import BytesIO
    figfile = BytesIO()
    plt.savefig(figfile, format='png')
    figfile.seek(0)  # rewind to beginning of file
    import base64
    figdata_png = base64.b64encode(figfile.getvalue())
    return figdata_png
Example #53
0
def iterate_over_test_chunk(pyarrow_type,
                            column_meta,
                            source_data_generator,
                            expected_data_transformer=None):
    stream = BytesIO()

    assert len(pyarrow_type) == len(column_meta)

    column_size = len(pyarrow_type)
    batch_row_count = 10
    batch_count = 9

    fields = []
    for i in range(column_size):
        fields.append(
            pyarrow.field(f"column_{i}", pyarrow_type[i], True,
                          column_meta[i]))
    schema = pyarrow.schema(fields)

    expected_data = []
    writer = RecordBatchStreamWriter(stream, schema)

    for i in range(batch_count):
        column_arrays = []
        py_arrays = []
        for j in range(column_size):
            column_data = []
            not_none_cnt = 0
            while not_none_cnt == 0:
                column_data = []
                for _ in range(batch_row_count):
                    data = (None if bool(random.getrandbits(1)) else
                            source_data_generator())
                    if data is not None:
                        not_none_cnt += 1
                    column_data.append(data)
            column_arrays.append(column_data)
            py_arrays.append(pyarrow.array(column_data, type=pyarrow_type[j]))

        if expected_data_transformer:
            for i in range(len(column_arrays)):
                column_arrays[i] = [
                    expected_data_transformer(_data)
                    if _data is not None else None
                    for _data in column_arrays[i]
                ]
        expected_data.append(column_arrays)

        column_names = [f"column_{i}" for i in range(column_size)]
        rb = RecordBatch.from_arrays(py_arrays, column_names)
        writer.write_batch(rb)

    writer.close()

    # seek stream to begnning so that we can read from stream
    stream.seek(0)
    context = ArrowConverterContext()
    it = PyArrowIterator(None, stream, context, False, False, False)
    it.init(IterUnit.ROW_UNIT.value)

    count = 0
    while True:
        try:
            val = next(it)
            for i in range(column_size):
                batch_index = int(count / batch_row_count)
                assert (val[i] == expected_data[batch_index][i][
                    count - batch_row_count * batch_index])
            count += 1
        except StopIteration:
            assert count == (batch_count * batch_row_count)
            break
Example #54
0
def _rt_check_case(name, expected, format):
    mat_stream = BytesIO()
    savemat(mat_stream, expected, format=format)
    mat_stream.seek(0)
    _load_check_case(name, [mat_stream], expected)
Example #55
0
class THeaderTransport(TTransportBase, CReadableTransport):
    """Transport that sends headers.  Also understands framed/unframed/HTTP
    transports and will do the right thing"""

    __max_frame_size = MAX_FRAME_SIZE

    # Defaults to current user, but there is also a setter below.
    __identity = None
    __first_request = True
    IDENTITY_HEADER = "identity"
    ID_VERSION_HEADER = "id_version"
    ID_VERSION = "1"
    CLIENT_METADATA_HEADER = "client_metadata"

    def __init__(self, trans, client_types=None, client_type=None):
        self.__trans = trans
        self.__rbuf = StringIO()
        self.__rbuf_frame = False
        self.__wbuf = StringIO()
        self.seq_id = 0
        self.__flags = 0
        self.__read_transforms = []
        self.__write_transforms = []
        self.__supported_client_types = set(client_types or (CLIENT_TYPE.HEADER,))
        self.__proto_id = T_COMPACT_PROTOCOL  # default to compact like c++
        self.__client_type = client_type or CLIENT_TYPE.HEADER
        self.__read_headers = {}
        self.__read_persistent_headers = {}
        self.__write_headers = {}
        self.__write_persistent_headers = {}

        if self.__client_type in (
            CLIENT_TYPE.UNFRAMED_DEPRECATED,
            CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED,
            CLIENT_TYPE.FRAMED_DEPRECATED,
            CLIENT_TYPE.FRAMED_COMPACT,
        ):
            self.__client_type = CLIENT_TYPE.HEADER

        self.__supported_client_types.add(self.__client_type)

        # If we support unframed binary / framed binary also support compact
        if CLIENT_TYPE.UNFRAMED_DEPRECATED in self.__supported_client_types:
            self.__supported_client_types.add(CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED)
        if CLIENT_TYPE.FRAMED_DEPRECATED in self.__supported_client_types:
            self.__supported_client_types.add(CLIENT_TYPE.FRAMED_COMPACT)

    def set_header_flag(self, flag):
        self.__flags |= flag

    def clear_header_flag(self, flag):
        self.__flags &= ~flag

    def header_flags(self):
        return self.__flags

    def set_max_frame_size(self, size):
        if size > MAX_BIG_FRAME_SIZE:
            raise TTransportException(
                TTransportException.INVALID_FRAME_SIZE,
                "Cannot set max frame size > %s" % MAX_BIG_FRAME_SIZE,
            )
        if size > MAX_FRAME_SIZE and self.__client_type != CLIENT_TYPE.HEADER:
            raise TTransportException(
                TTransportException.INVALID_FRAME_SIZE,
                "Cannot set max frame size > %s for clients other than HEADER"
                % MAX_FRAME_SIZE,
            )
        self.__max_frame_size = size

    def get_peer_identity(self):
        if self.IDENTITY_HEADER in self.__read_headers:
            if self.__read_headers[self.ID_VERSION_HEADER] == self.ID_VERSION:
                return self.__read_headers[self.IDENTITY_HEADER]
        return None

    def set_identity(self, identity):
        self.__identity = identity

    def get_protocol_id(self):
        return self.__proto_id

    def set_protocol_id(self, proto_id):
        self.__proto_id = proto_id

    def set_header(self, str_key, str_value):
        self.__write_headers[str_key] = str_value

    def get_write_headers(self):
        return self.__write_headers

    def get_headers(self):
        return self.__read_headers

    def clear_headers(self):
        self.__write_headers.clear()

    def set_persistent_header(self, str_key, str_value):
        self.__write_persistent_headers[str_key] = str_value

    def get_write_persistent_headers(self):
        return self.__write_persistent_headers

    def clear_persistent_headers(self):
        self.__write_persistent_headers.clear()

    def add_transform(self, trans_id):
        self.__write_transforms.append(trans_id)

    def _reset_protocol(self):
        # HTTP calls that are one way need to flush here.
        if self.__client_type == CLIENT_TYPE.HTTP_SERVER:
            self.flush()
        # set to anything except unframed
        self.__client_type = CLIENT_TYPE.UNKNOWN
        # Read header bytes to check which protocol to decode
        self.readFrame(0)

    def getTransport(self):
        return self.__trans

    def isOpen(self):
        return self.getTransport().isOpen()

    def open(self):
        return self.getTransport().open()

    def close(self):
        return self.getTransport().close()

    def read(self, sz):
        ret = self.__rbuf.read(sz)
        if len(ret) == sz:
            return ret

        if self.__client_type in (
            CLIENT_TYPE.UNFRAMED_DEPRECATED,
            CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED,
        ):
            return ret + self.getTransport().readAll(sz - len(ret))

        self.readFrame(sz - len(ret))
        return ret + self.__rbuf.read(sz - len(ret))

    readAll = read  # TTransportBase.readAll does a needless copy here.

    def readFrame(self, req_sz):
        self.__rbuf_frame = True
        word1 = self.getTransport().readAll(4)
        sz = unpack("!I", word1)[0]
        proto_id = word1[0]
        if proto_id == TBinaryProtocol.PROTOCOL_ID:
            # unframed
            self.__client_type = CLIENT_TYPE.UNFRAMED_DEPRECATED
            self.__proto_id = T_BINARY_PROTOCOL
            if req_sz <= 4:  # check for reads < 0.
                self.__rbuf = StringIO(word1)
            else:
                self.__rbuf = StringIO(word1 + self.getTransport().read(req_sz - 4))
        elif proto_id == TCompactProtocol.PROTOCOL_ID:
            self.__client_type = CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED
            self.__proto_id = T_COMPACT_PROTOCOL
            if req_sz <= 4:  # check for reads < 0.
                self.__rbuf = StringIO(word1)
            else:
                self.__rbuf = StringIO(word1 + self.getTransport().read(req_sz - 4))
        elif sz == HTTP_SERVER_MAGIC:
            self.__client_type = CLIENT_TYPE.HTTP_SERVER
            mf = self.getTransport().handle.makefile("rb", -1)

            self.handler = RequestHandler(mf, "client_address:port", "")
            self.header = self.handler.wfile
            self.__rbuf = StringIO(self.handler.data)
        else:
            if sz == BIG_FRAME_MAGIC:
                sz = unpack("!Q", self.getTransport().readAll(8))[0]
            # could be header format or framed.  Check next two bytes.
            magic = self.getTransport().readAll(2)
            proto_id = magic[0]
            if proto_id == TCompactProtocol.PROTOCOL_ID:
                self.__client_type = CLIENT_TYPE.FRAMED_COMPACT
                self.__proto_id = T_COMPACT_PROTOCOL
                _frame_size_check(sz, self.__max_frame_size, header=False)
                self.__rbuf = StringIO(magic + self.getTransport().readAll(sz - 2))
            elif proto_id == TBinaryProtocol.PROTOCOL_ID:
                self.__client_type = CLIENT_TYPE.FRAMED_DEPRECATED
                self.__proto_id = T_BINARY_PROTOCOL
                _frame_size_check(sz, self.__max_frame_size, header=False)
                self.__rbuf = StringIO(magic + self.getTransport().readAll(sz - 2))
            elif magic == PACKED_HEADER_MAGIC:
                self.__client_type = CLIENT_TYPE.HEADER
                _frame_size_check(sz, self.__max_frame_size)
                # flags(2), seq_id(4), header_size(2)
                n_header_meta = self.getTransport().readAll(8)
                self.__flags, self.seq_id, header_size = unpack("!HIH", n_header_meta)
                data = StringIO()
                data.write(magic)
                data.write(n_header_meta)
                data.write(self.getTransport().readAll(sz - 10))
                data.seek(10)
                self.read_header_format(sz - 10, header_size, data)
            else:
                self.__client_type = CLIENT_TYPE.UNKNOWN
                raise TTransportException(
                    TTransportException.INVALID_CLIENT_TYPE,
                    "Could not detect client transport type",
                )

        if self.__client_type not in self.__supported_client_types:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Client type {} not supported on server".format(self.__client_type),
            )

    def read_header_format(self, sz, header_size, data):
        # clear out any previous transforms
        self.__read_transforms = []

        header_size = header_size * 4
        if header_size > sz:
            raise TTransportException(
                TTransportException.INVALID_FRAME_SIZE,
                "Header size is larger than frame",
            )
        end_header = header_size + data.tell()

        self.__proto_id = readVarint(data)
        num_headers = readVarint(data)

        if self.__proto_id == 1 and self.__client_type != CLIENT_TYPE.HTTP_SERVER:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Trying to recv JSON encoding over binary",
            )

        # Read the headers.  Data for each header varies.
        for _ in range(0, num_headers):
            trans_id = readVarint(data)
            if trans_id in (TRANSFORM.ZLIB, TRANSFORM.SNAPPY, TRANSFORM.ZSTD):
                self.__read_transforms.insert(0, trans_id)
            elif trans_id == TRANSFORM.HMAC:
                raise TApplicationException(
                    TApplicationException.INVALID_TRANSFORM,
                    "Hmac transform is no longer supported: %i" % trans_id,
                )
            else:
                # TApplicationException will be sent back to client
                raise TApplicationException(
                    TApplicationException.INVALID_TRANSFORM,
                    "Unknown transform in client request: %i" % trans_id,
                )

        # Clear out previous info headers.
        self.__read_headers.clear()

        # Read the info headers.
        while data.tell() < end_header:
            info_id = readVarint(data)
            if info_id == INFO.NORMAL:
                _read_info_headers(data, end_header, self.__read_headers)
            elif info_id == INFO.PERSISTENT:
                _read_info_headers(data, end_header, self.__read_persistent_headers)
            else:
                break  # Unknown header.  Stop info processing.

        if self.__read_persistent_headers:
            self.__read_headers.update(self.__read_persistent_headers)

        # Skip the rest of the header
        data.seek(end_header)

        payload = data.read(sz - header_size)

        # Read the data section.
        self.__rbuf = StringIO(self.untransform(payload))

    def write(self, buf):
        self.__wbuf.write(buf)

    def transform(self, buf):
        for trans_id in self.__write_transforms:
            if trans_id == TRANSFORM.ZLIB:
                buf = zlib.compress(buf)
            elif trans_id == TRANSFORM.SNAPPY:
                buf = snappy.compress(buf)
            elif trans_id == TRANSFORM.ZSTD:
                buf = zstd.ZstdCompressor(write_content_size=True).compress(buf)
            else:
                raise TTransportException(
                    TTransportException.INVALID_TRANSFORM,
                    "Unknown transform during send",
                )
        return buf

    def untransform(self, buf):
        for trans_id in self.__read_transforms:
            if trans_id == TRANSFORM.ZLIB:
                buf = zlib.decompress(buf)
            elif trans_id == TRANSFORM.SNAPPY:
                buf = snappy.decompress(buf)
            elif trans_id == TRANSFORM.ZSTD:
                buf = zstd.ZstdDecompressor().decompress(buf)
            if trans_id not in self.__write_transforms:
                self.__write_transforms.append(trans_id)
        return buf

    def disable_client_metadata(self):
        self.__first_request = False

    def flush(self):
        self.flushImpl(False)

    def onewayFlush(self):
        self.flushImpl(True)

    def _flushHeaderMessage(self, buf, wout, wsz):
        """Write a message for CLIENT_TYPE.HEADER

        @param buf(StringIO): Buffer to write message to
        @param wout(str): Payload
        @param wsz(int): Payload length
        """
        transform_data = StringIO()
        # For now, all transforms don't require data.
        num_transforms = len(self.__write_transforms)
        for trans_id in self.__write_transforms:
            transform_data.write(getVarint(trans_id))

        # Add in special flags.
        if self.__identity:
            self.__write_headers[self.ID_VERSION_HEADER] = self.ID_VERSION
            self.__write_headers[self.IDENTITY_HEADER] = self.__identity

        if self.__first_request:
            self.__first_request = False
            self.__write_headers[
                self.CLIENT_METADATA_HEADER
            ] = '{"agent":"THeaderTransport.py"}'

        info_data = StringIO()

        # Write persistent kv-headers
        _flush_info_headers(
            info_data, self.get_write_persistent_headers(), INFO.PERSISTENT
        )

        # Write non-persistent kv-headers
        _flush_info_headers(info_data, self.__write_headers, INFO.NORMAL)

        header_data = StringIO()
        header_data.write(getVarint(self.__proto_id))
        header_data.write(getVarint(num_transforms))

        header_size = transform_data.tell() + header_data.tell() + info_data.tell()

        padding_size = 4 - (header_size % 4)
        header_size = header_size + padding_size

        # MAGIC(2) | FLAGS(2) + SEQ_ID(4) + HEADER_SIZE(2)
        wsz += header_size + 10
        if wsz > MAX_FRAME_SIZE:
            buf.write(pack("!I", BIG_FRAME_MAGIC))
            buf.write(pack("!Q", wsz))
        else:
            buf.write(pack("!I", wsz))
        buf.write(pack("!HH", HEADER_MAGIC >> 16, self.__flags))
        buf.write(pack("!I", self.seq_id))
        buf.write(pack("!H", header_size // 4))

        buf.write(header_data.getvalue())
        buf.write(transform_data.getvalue())
        buf.write(info_data.getvalue())

        # Pad out the header with 0x00
        for _ in range(0, padding_size, 1):
            buf.write(pack("!c", b"\0"))

        # Send data section
        buf.write(wout)

    def flushImpl(self, oneway):
        wout = self.__wbuf.getvalue()
        wout = self.transform(wout)
        wsz = len(wout)

        # reset wbuf before write/flush to preserve state on underlying failure
        self.__wbuf.seek(0)
        self.__wbuf.truncate()

        if self.__proto_id == 1 and self.__client_type != CLIENT_TYPE.HTTP_SERVER:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Trying to send JSON encoding over binary",
            )

        buf = StringIO()
        if self.__client_type == CLIENT_TYPE.HEADER:
            self._flushHeaderMessage(buf, wout, wsz)
        elif self.__client_type in (
            CLIENT_TYPE.FRAMED_DEPRECATED,
            CLIENT_TYPE.FRAMED_COMPACT,
        ):
            buf.write(pack("!i", wsz))
            buf.write(wout)
        elif self.__client_type in (
            CLIENT_TYPE.UNFRAMED_DEPRECATED,
            CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED,
        ):
            buf.write(wout)
        elif self.__client_type == CLIENT_TYPE.HTTP_SERVER:
            # Reset the client type if we sent something -
            # oneway calls via HTTP expect a status response otherwise
            buf.write(self.header.getvalue())
            buf.write(wout)
            self.__client_type == CLIENT_TYPE.HEADER
        elif self.__client_type == CLIENT_TYPE.UNKNOWN:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE, "Unknown client type"
            )

        # We don't include the framing bytes as part of the frame size check
        frame_size = buf.tell() - (4 if wsz < MAX_FRAME_SIZE else 12)
        _frame_size_check(
            frame_size,
            self.__max_frame_size,
            header=self.__client_type == CLIENT_TYPE.HEADER,
        )
        self.getTransport().write(buf.getvalue())
        if oneway:
            self.getTransport().onewayFlush()
        else:
            self.getTransport().flush()

    # Implement the CReadableTransport interface.
    @property
    def cstringio_buf(self):
        if not self.__rbuf_frame:
            self.readFrame(0)
        return self.__rbuf

    def cstringio_refill(self, prefix, reqlen):
        # self.__rbuf will already be empty here because fastproto doesn't
        # ask for a refill until the previous buffer is empty.  Therefore,
        # we can start reading new frames immediately.

        # On unframed clients, there is a chance there is something left
        # in rbuf, and the read pointer is not advanced by fastproto
        # so seek to the end to be safe
        self.__rbuf.seek(0, 2)
        while len(prefix) < reqlen:
            prefix += self.read(reqlen)
        self.__rbuf = StringIO(prefix)
        return self.__rbuf
Example #56
0
async def draw_matches_table(matches, game_strings):
    region_data = read_json(settings.resource("json/region_data.json"))

    border_size = 10
    grey_color = "#BBBBBB"
    table = Table(background=discord_color2)
    # Header
    headers = [
        TextCell("Hero", padding=0),
        TextCell(""),
        TextCell("Result"),
        TextCell("K", horizontal_align="center"),
        TextCell("D", horizontal_align="center"),
        TextCell("A", horizontal_align="center"),
        TextCell("Duration"),
        TextCell("Type"),
        TextCell("Date")
    ]
    table.add_row(headers)
    for cell in table.rows[0]:
        cell.background = discord_color1

    table.add_row([
        ColorCell(color=discord_color1, height=6) for i in range(len(headers))
    ])
    first = True
    for match in matches:
        won_match = bool(
            match["radiant_win"]) == bool(match["player_slot"] < 128)
        game_mode = game_strings.get(f"game_mode_{match['game_mode']}",
                                     "Unknown")
        lobby_type = game_strings.get(f"lobby_type_{match['lobby_type']}",
                                      "Unknown")
        if first:
            first = False
        else:
            table.add_row([
                ColorCell(color=discord_color2, height=12)
                for i in range(len(headers))
            ])
        table.add_row([
            ImageCell(img=await get_hero_image(match["hero_id"]), height=48),
            DoubleCell(
                TextCell(get_hero_name(match["hero_id"]), font_size=24),
                TextCell(match.get("match_id"),
                         font_size=12,
                         horizontal_align="left",
                         color=grey_color)),
            TextCell("Win" if won_match else "Loss",
                     color=("green" if won_match else "red"),
                     horizontal_align="center"),
            TextCell(match.get("kills")),
            TextCell(match.get("deaths")),
            TextCell(match.get("assists")),
            TextCell(format_duration_simple(match.get("duration")),
                     horizontal_align="center"),
            DoubleCell(
                TextCell(game_mode,
                         font_size=18,
                         padding_right=15,
                         color=grey_color),
                TextCell(lobby_type,
                         font_size=18,
                         padding_right=15,
                         color=grey_color)),
            get_datetime_cell(match, region_data)
        ])
    image = table.render()

    border_image = Image.new('RGBA',
                             (image.size[0] +
                              (border_size * 2), image.size[1] + border_size),
                             color=discord_color1)
    image = paste_image(border_image, image, border_size, 0)

    fp = BytesIO()
    image.save(fp, format="PNG")
    fp.seek(0)

    return fp
Example #57
0
def detect_sync(*args, **kwargs):
    # check if the request_processing_function had an error while parsing user specified parameters
    if kwargs.get('error_code', None) is not None:
        return _make_error_response(kwargs.get('error_code'),
                                    kwargs.get('error_message'))

    render_boxes = kwargs.get('render_boxes')
    classification = kwargs.get('classification')
    detection_confidence = kwargs.get('detection_confidence')
    images = kwargs.get('images')
    image_names = kwargs.get('image_names')

    detection_results = []
    inference_time_detector = []

    try:
        print('runserver, post_detect_sync, scoring images...')

        for image_name, image in zip(image_names, images):
            start_time = time.time()

            result = detector.generate_detections_one_image(image, image_name)
            detection_results.append(result)

            elapsed = time.time() - start_time
            inference_time_detector.append(elapsed)

    except Exception as e:
        print('Error performing detection on the images: ' + str(e))
        log.log_exception('Error performing detection on the images: ' +
                          str(e))
        return _make_error_response(
            500, 'Error performing detection on the images: ' + str(e))

    # filter the detections by the confidence threshold
    filtered_results = {
    }  # json to return to the user along with the rendered images if they opted for it
    # each result is [ymin, xmin, ymax, xmax, confidence, category]
    try:
        for result in detection_results:
            image_name = result['file']
            detections = result.get('detections', None)
            filtered_results[image_name] = []

            if detections is None:
                continue

            for d in detections:
                if d['conf'] > detection_confidence:
                    res = TFDetector.convert_to_tf_coords(d['bbox'])
                    res.append(d['conf'])
                    res.append(
                        int(d['category'])
                    )  # category is an int here, not string as in the async API
                    filtered_results[image_name].append(res)

    except Exception as e:
        print('Error consolidating the detection boxes: ' + str(e))
        log.log_exception('Error consolidating the detection boxes: ' + str(e))
        return _make_error_response(
            500, 'Error consolidating the detection boxes: ' + str(e))

    # classification
    classification_result = {}
    # TODO
    # try:
    #     if classification:
    #         print('runserver, classification...')
    #         tic = datetime.now()
    #         classification_result = classifier.classify_boxes(images, image_names, result, classification)
    #         toc = datetime.now()
    #         classification_inference_duration = toc - tic
    #         print('runserver, classification, classifcation inference duraction: {}' \
    #               .format({classification_inference_duration}))
    #
    #     else:
    #         classification_result = {}
    #
    # except Exception as e:
    #     print('Error performing classification on the images: ' + str(e))
    #     log.log_exception('Error performing classification on the images: ' + str(e))
    #     abort(500, 'Error performing classification on the images: ' + str(e))

    # return results; optionally render the detections on the images and send the annotated images back
    try:
        print(
            'runserver, post_detect_sync, rendering and sending images back...'
        )
        fields = {
            'detection_result':
            ('detection_result', json.dumps(filtered_results),
             'application/json'),
            'classification_result':
            ('classification_result', json.dumps(classification_result),
             'application/json')
        }

        if render_boxes:
            for image_name, image, result in zip(image_names, images,
                                                 detection_results):
                detections = result.get('detections', None)
                if detections is None:
                    continue
                viz_utils.render_detection_bounding_boxes(
                    detections,
                    image,
                    confidence_threshold=detection_confidence)

                output_img_stream = BytesIO()
                image.save(output_img_stream, format='jpeg')
                output_img_stream.seek(0)
                fields[image_name] = (image_name, output_img_stream,
                                      'image/jpeg')

        m = MultipartEncoder(fields=fields)

        if len(inference_time_detector) > 0:
            mean_inference_time_detector = sum(inference_time_detector) / len(
                inference_time_detector)
        else:
            mean_inference_time_detector = -1

        log.log_info(
            'detector mean inference time',
            mean_inference_time_detector,
            additionalProperties={
                'detector mean inference time':
                str(mean_inference_time_detector),
                # TODO 'classification mean inference time': str(''),
                'num_images': len(image_names),
                'render_boxes': render_boxes,
                'detection_confidence': detection_confidence
            })
        return Response(m.to_string(), mimetype=m.content_type)
    except Exception as e:
        print('Error returning result or rendering the detection boxes: ' +
              str(e))
        log.log_exception(
            'Error returning result or rendering the detection boxes: ' +
            str(e))
        return _make_error_response(
            500, 'Error returning result or rendering the detection boxes: ' +
            str(e))
Example #58
0
def do_test_fn_return_output_socket(function, *args):
    output_socket = BytesIO()
    function(output_socket, *args)
    output_socket.seek(0)
    return output_socket
Example #59
0
def rebuild(ds):
    # pick and unpickle
    f = BytesIO()
    picked = pickle.dump(ds, f)
    f.seek(0)
    return pickle.load(f)
def img2bytes(img: Image.Image):
    buf = BytesIO()
    img.save(buf, format='PNG')
    buf.seek(0)
    return buf