Пример #1
0
 def handle_write(self):
     '''
     @see: dispatcher.handle_write
     '''
     assert self._writeq, 'Nothing to write'
     
     data, close = BytesIO(), False
     while self._writeq and data.tell() < self.bufferSize:
         content = self._writeq.popleft()
         if content is None:
             if self.close_connection: close = True
             break
         if isinstance(content, (bytes, memoryview)): data.write(content)
         elif isinstance(content, IInputStream):
             assert isinstance(content, IInputStream)
             byts = content.read(self.bufferSize - data.tell())
             if byts == b'':
                 if isinstance(content, IClosable): content.close()
                 continue
             data.write(byts)
             self._writeq.appendleft(content)
         else:
             while data.tell() < self.bufferSize:
                 try: byts = next(content)
                 except StopIteration: break
                 data.write(byts)
     
     sent = self.send(data.getbuffer())
     
     if close: self.close()
     elif sent < data.tell():
         self._writeq.appendleft(data.getbuffer()[sent:])
Пример #2
0
 def _simulate_save(self, quality):
     """
     Saves original_image to stream IO at given quality and returns the data and size
     """
     output = BytesIO()
     save_kwargs = {
         'format': self.original_format,
         self.quality_range[self.original_format]['meta']: quality
     }
     self.original_image.save(
         output,
         **save_kwargs
     )
     print('Simulated save level %s size %s bytes' % (quality, output.getbuffer().nbytes))
     return output, output.getbuffer().nbytes
def saveData(_id, zoom=15):
	"""
	read from CSV and parse data
	we use the database to store the data-key before it is will be intensely read
	"""
	csv = open("./%s/data.csv"%_id, "r")
	# data = mg.parseData(csv)
	data, info, header = mg.parseData(csv)
	csv.close()
	batch = leveldb.WriteBatch()
	for z in range(zoom+1):
		# splited_data = mg.splitData(data, z)
		splitted_data, splitted_info = mg.splitData(data, info, z)
		size = 2**z

		for x in range(size):
			for y in range(size):
				datum = np.array(splitted_data[x][y])
				# key = dataKey(_id, x, y, z)
				datakey = dataKey(_id, x, y, z)
				value = BytesIO()
				np.save(value, datum)
				# batch.Put(key, value.getbuffer())
				batch.Put(datakey, value.getbuffer())
				infokey= infoKey(_id, x, y, z)
				batch.Put(infokey, json.dumps(splitted_info[x][y]).encode())
		meta = {'header': header, 'color': '', 'infos': ''}
		writeMeta(_id, meta)
	db.Write(batch, sync=True)
Пример #4
0
def response_for_download_zip(folder, maxFileSize = MAX_ZIP_FILE_SIZE):
    """
    Preparing response for downloading zip-file,
    consist of all files in folder (without recursion!)
    Content-Disposition header field from http://tools.ietf.org/html/rfc2183
    :param:     folder - instance of Folder model
    :return:    HttpResponse with zip file and some parameters
                Message in case of zip file overflow max size
    """
    zipSubdir = folder.name
    zipFilename = "%s.zip" % folder.name
    sio = BytesIO()  # Open StringIO to grab in-memory ZIP contents
    zipFile = zipfile.ZipFile(sio, "w")    # The zip compressor
    zipFileSize = 0
    msg = ""
    for report in Report.objects.filter(parent_id=folder.id):
        zipFileSize += report.file.size
        if zipFileSize > maxFileSize:
            msg = 'Завеликий zip. Решту файлів відкинуто'
            break
        filename = report.filename              # "людська" назва файла
        filepath = report.file.name             # шлях до файла на диску
        abs_path = os.path.join(MEDIA_ROOT, filepath)
        zipPath = os.path.join(zipSubdir, filename) # шлях в архіві
        zipFile.write(abs_path, zipPath)            # add file to zip
    zipFile.close() # Must close zip for all contents to be written
    fileExt  = ".zip"
    ct = get_mimeType().get(fileExt.lower(), "application/octet-stream")
    fn = ' filename="%s";' % transliterate(zipFilename)
    fns = " filename*=utf-8''%s;" % urlquote(zipFilename)
    # Grab ZIP file from in-memory, make response with correct MIME-type
    response = HttpResponse(sio.getvalue(), content_type=ct)
    response['Content-Disposition'] = 'attachment' + fn + fns
    response['Content-Length'] = len(sio.getbuffer())
    return response, zipFilename, msg
Пример #5
0
    def assertBlock(self, python, java):
        self.maxDiff = None
        dump = False

        py_block = PyBlock(parent=PyModule('test', 'test.py'))
        if python:
            python = adjust(python)
            code = compile(python, '<test>', 'exec')
            py_block.extract(code, debug=dump)

        java_code = py_block.transpile()

        out = BytesIO()
        constant_pool = ConstantPool()
        java_code.resolve(constant_pool)

        constant_pool.add(Utf8('test'))
        constant_pool.add(Utf8('Code'))
        constant_pool.add(Utf8('LineNumberTable'))

        writer = ClassFileWriter(out, constant_pool)
        java_code.write(writer)

        debug = StringIO()
        reader = ClassFileReader(BytesIO(out.getbuffer()), constant_pool, debug=debug)
        JavaCode.read(reader, dump=0)

        if dump:
            print(debug.getvalue())

        java = adjust(java)
        self.assertEqual(debug.getvalue(), java[1:])
Пример #6
0
 def __create_small_picture__(request):
     """
     Generates a smaller, standard size (128x128) picture of original image with filename <o_o_image_filename>.
     Filename of smaller file is <o_o_image_filename>_sm
     If this fails, the smaller file will we removed!
     :param request:  the originial request object
     :param o_image: the filename of original image
     :return: True on success, False else
     """
     profile = request.user.profile
     outfile = profile.profile_picture_full.path + "_sm"
     try:
         im = Image.open(request.user.profile.profile_picture_full.path)
         im.thumbnail((128, 128))
         thumb_io = BytesIO()
         im.save(thumb_io, format='JPEG')
         thumb_file = InMemoryUploadedFile(thumb_io, None, 'pp.jpg', 'image/jpeg',
                                           thumb_io.getbuffer().nbytes, None)
         profile.profile_picture_small = thumb_file
         profile.save()
         return True
     except IOError:
         logging.error("Fehler beim speichern des thumbnails")
         try:
             os.remove(outfile)
         except IOError:
             pass
         return False
Пример #7
0
 def save(self, *args, **kwargs):
     if self.pk is None:
         saved_picture = self.picture
         self.picture = None
         super().save(*args, **kwargs)
         self.picture = saved_picture
     # TODO-працює збереження файлів після зміни розміру.
     # Змінити:
     # - зберігати файл 200х200 під тою ж назвою 1.jpg
     # - в template фільтр 200х200 прибрати, а дозволити показувати
     #   все зображення (бо воно вже на етапі збереження
     #   обрізане до 200*200)
     if self.picture:
         # Якщо з форми не приходить новий файл, то self.picture.name
         # міститиме шлях до старого файла, який відповідатиме шаблону
         # "profile_images/[0-9]*.jpg".
         # У протилежному випадку self.picture.name - це новий файл,
         # який потрібно мінімізувати перед збереженням.
         if not CheckPathMatchMediaPattern(self.picture.name):
             image = Image.open(BytesIO(self.picture.read()))
             image.thumbnail((200,200), Image.ANTIALIAS)
             output = BytesIO()
             image.save(output, format='JPEG', quality=90, optimize=1)
             output.seek(0)
             self.picture= InMemoryUploadedFile(output,'ImageField',
                              self.picture.name, 'image/jpeg',
                              output.getbuffer().__len__(), None)
     super().save(*args, **kwargs)
Пример #8
0
def crop_img(image, crop):
    try:
        temp = Image.open(image)
        format = temp.format
        # Rotate image based off of exif data (used by mobile devices that automatically orients the picture)
        # http://www.lifl.fr/~damien.riquet/auto-rotating-pictures-using-pil.html
        try:
            exif = temp._getexif()
            orientation_key = 274
            if exif:
                if orientation_key in exif:
                    orientation = exif[orientation_key]
                    rotate_values = {3: 180, 6: 270, 8: 90}
                    if orientation in rotate_values:
                        temp = temp.rotate(rotate_values[orientation], expand=1)
        except Exception:
            pass
        extra = json.loads(crop)
        x = int(extra.get('x'))
        y = int(extra.get('y'))
        height = int(extra.get('height'))
        width = int(extra.get('width'))
        temp = temp.crop((x, y, x + width, y + height))
        # Django imagefield is a InMemoryUploadedFile, need to convert PIL image in order to save
        temp_io = BytesIO()
        temp.save(temp_io, format=format)
        return InMemoryUploadedFile(temp_io, None, image.name, 'image/' + format, temp_io.getbuffer().nbytes, None)
    except Exception:
        print(traceback.format_exc())
        return 'Unable to read image, try a different image'
def main():
    content = BytesIO(b'Hello again')

    key = b'32byteslongsecretkeymustprovided'
    encryption_key = base64.b64encode(key).decode()
    encryption_key_md5 = base64.b64encode(hashlib.md5(key).digest()).decode()

    minio = Minio(STORAGE_ENDPOINT, access_key=AWSAccessKeyId, secret_key=AWSSecretKey)

    # Put object with special headers which encrypt object in S3 with provided key
    minio.put_object(STORAGE_BUCKET, 'test_crypt.txt', content, content.getbuffer().nbytes,
                     metadata={
                         'x-amz-server-side-encryption-customer-algorithm': 'AES256',
                         'x-amz-server-side-encryption-customer-key': encryption_key,
                         'x-amz-server-side-encryption-customer-key-MD5': encryption_key_md5
                     })

    # Get decrypted object with same headers
    obj = minio.get_object(STORAGE_BUCKET, 'test_crypt1.txt', request_headers={
        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
        'x-amz-server-side-encryption-customer-key': encryption_key,
        'x-amz-server-side-encryption-customer-key-MD5': encryption_key_md5
    })

    print(obj.read())
Пример #10
0
def decompress(fd):
    window = array('B', (b' ' * 4078) + (b'\x00' * 18))

    pos = 4078
    _out = BytesIO()

    def out(byte):
        nonlocal window, pos
        window[pos] = byte
        pos = (pos + 1) & 0xFFF
        _out.write(bytes([byte]))

    try:
        while True:
            for encoded in bits(fd.read(1)[0]):
                if encoded:
                    out(fd.read(1)[0])
                else: #encoded
                    code = fd.read(2)
                    offset = code[0] | (code[1] & 0xF0) << 4
                    length = (code[1] & 0xF) + 3

                    for x in range(offset, offset+length):
                        out(window[x & 0xFFF])
    except IndexError:
        pass

    return _out.getbuffer()
Пример #11
0
class ContainerWriter(object):
    def __init__(self, fp, schema, sync_marker=None):
        self.writer = Writer(schema)
        self.fp = fp
        self.sync_marker = sync_marker or os.urandom(16)
        self.header_written = sync_marker is not None

        self.records = 0
        self.buffer = BytesIO()

    def write_header(self):
        assert not self.header_written, "Header is already written once"

        Writer(HEADER_SCHEMA).write(self.fp, {
            "magic": b"Obj\x01",
            "meta": {
                "avro.schema": json.dumps(self.schema.json).encode("utf8"),
                "avro.codec": b"null"
            },
            "sync": self.sync_marker
        })

        self.header_written = True

    def write(self, message):
        self.writer.write(self.buffer, message)
        self.records += 1

        if self.buffer.tell() > 1024 ** 2:
            self.flush()

    def flush(self):
        if not self.header_written:
            self.write_header()
            self.header_written = True

        if not self.records:
            return

        write_long(self.fp, self.records)
        write_long(self.fp, self.buffer.tell())
        self.fp.write(self.buffer.getbuffer())
        self.fp.write(self.sync_marker)
        self.fp.flush()

        self.records = 0
        self.buffer = BytesIO()

    @property
    def schema(self):
        """Returns the :class:`avrolight.schema.Schema` instance that this writer uses."""
        return self.writer.schema

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.flush()

    close = flush
Пример #12
0
 def generate_preview(self, new_ver=False):
     if new_ver:
         b = BytesIO()
         self._get_workspace().save(b, "png")
         return b.getbuffer().tobytes()
     else:
         return self._magic.dump(mode="preview")
def post_meshes_to_dvid(config, instance_name, partition_items):
    """
    Send the given meshes (either .obj or .drc) as key/value pairs to DVID.
    
    Args:
        config: The CreateMeshes workflow config data
        
        instance_name: key-value instance to post to
            
        partition_items: tuple (group_id, [(segment_id, mesh_data), (segment_id, mesh_data)])
    """
    # Re-use session for connection pooling.
    session = default_dvid_session()

    # Re-use resource manager client connections, too.
    # (If resource-server is empty, this will return a "dummy client")    
    resource_client = ResourceManagerClient( config["options"]["resource-server"],
                                             config["options"]["resource-port"] )

    dvid_server = config["dvid-info"]["dvid"]["server"]
    uuid = config["dvid-info"]["dvid"]["uuid"]
    
    grouping_scheme = config["mesh-config"]["storage"]["grouping-scheme"]
    mesh_format = config["mesh-config"]["storage"]["format"]

    if grouping_scheme == "no-groups":
        for group_id, segment_ids_and_meshes in partition_items:
            for (segment_id, mesh_data) in segment_ids_and_meshes:

                @auto_retry(3, pause_between_tries=60.0, logging_name=__name__)
                def write_mesh():
                    with resource_client.access_context(dvid_server, False, 2, len(mesh_data)):
                        session.post(f'{dvid_server}/api/node/{uuid}/{instance_name}/key/{segment_id}', mesh_data)
                        session.post(f'{dvid_server}/api/node/{uuid}/{instance_name}/key/{segment_id}_info', json={ 'format': mesh_format })
                
                write_mesh()
    else:
        # All other grouping schemes, including 'singletons' write tarballs.
        # (In the 'singletons' case, there is just one tarball per body.)
        for group_id, segment_ids_and_meshes in partition_items:
            tar_name = _get_group_name(config, group_id)
            tar_stream = BytesIO()
            with closing(tarfile.open(tar_name, 'w', tar_stream)) as tf:
                for (segment_id, mesh_data) in segment_ids_and_meshes:
                    mesh_name = _get_mesh_name(config, segment_id)
                    f_info = tarfile.TarInfo(mesh_name)
                    f_info.size = len(mesh_data)
                    tf.addfile(f_info, BytesIO(mesh_data))
    
            tar_bytes = tar_stream.getbuffer()

            @auto_retry(3, pause_between_tries=60.0, logging_name=__name__)
            def write_tar():
                with resource_client.access_context(dvid_server, False, 1, len(tar_bytes)):
                    session.post(f'{dvid_server}/api/node/{uuid}/{instance_name}/key/{tar_name}', tar_bytes)
            
            write_tar()
Пример #14
0
def genJiaozhunCS(c,fn):  
    tree = ET.parse(fn)
    root = tree.getroot()
    Worksheets=root.findall("{urn:schemas-microsoft-com:office:spreadsheet}Worksheet")  
    Worksheet = Worksheets[0]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    setCell(table,10,8,c.yiqixinghao)#setCell(table,10,8).Value =c.yiqixinghao
    setCell(table,16,8,c.yiqibh)# setCell(table,16,8).Value =c.yiqibh
    setCell(table,18,8,c.yonghu)# setCell(table,18,8).Value =c.yonghu
    setCell(table,20,8,c.addr)# setCell(table,18,8).Value =c.yonghu
    d=c.yujifahuo_date
    d1=d+datetime.timedelta(-1)
    setCell(table,32,8,str(d1.year))# setCell(table,32,8).FormulaR1C1 =d1.year#年
    setCell(table,32,12,str(d1.month))# setCell(table,32,12).Value =d1.month#月
    setCellWithFont(table,32,16,[str(d1.day)])# setCell(table,32,16).Value =d1.day#日
    d2=d+datetime.timedelta(364)
    setCell(table,35,8,str(d2.year))# setCell(table,35,8).Value =d2.year#年
    setCell(table,35,12,str(d2.month))# setCell(table,35,12).Value =d2.month#月
    setCellWithFont(table,35,16,[str(d2.day)])# setCell(table,35,16).Value =d2.day#日
    # #page 2
    Worksheet = Worksheets[1]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    #dd="  地点(LOCATION): "+c.yonghu
    setCellWithFont(table,26,3,["地点","(","LOCATION"+")"+":"+"    "+c.yonghu])# setCell(table,22,3).Value=ddsetCell(table,26,3,dd)# setCell(table,26,3,dd
    # #page 3
    Worksheet = Worksheets[2]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    eles=[]
    for i in range(6):
        eles.append(getCell(table,13,8+i*2))#setCell(table,13,8+i*2).Value)
    stds=[]
    for i in range(6):
        stds.append(getCell(table,14,8+i*2))#setCell(table,14,8+i*2).Value)
    (tests,errs)=genTest(eles,stds)
    for i in range(6):
        setCell(table,15,8+i*2,tests[i])#setCell(table,15,8+i*2).Value=tests[i]
    for i in range(6):
        setCell(table,16,8+i*2,errs[i])#setCell(table,16,8+i*2).Value=errs[i]
    # #jmd
    cave=0.0725
    crsd=0.3/100
    (rs,ave_str,rsd_str)=genjmd(cave,crsd)
    cjmd_str=",".join(rs)
    setCell(table,20,3,"   测量值(C/%):" +cjmd_str)# setCell(table,20,3).Value="   测量值(C/%):" +cjmd_str
    setCell(table,21,3,"   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%"))# setCell(table,21,3).Value="   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%")
    sjmd_str=" 0.0721,0.0725,0.0723,0.0727,0.0730,0.0725,0.0725"
    save=0.0723
    srsd=1.04/100
    (rs,ave_str,rsd_str)=genjmd(save,srsd)
    sjmd_str=",".join(rs)
    setCell(table,22,3,"   测量值(S/%):" + sjmd_str)# setCell(table,22,3).Value="   测量值(S/%):" + sjmd_str
    setCell(table,23,3,"   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%"))# setCell(table,23,3).Value="   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%")
    s=BytesIO()
    tree.write(s, encoding="utf-8", xml_declaration=True, method="xml")
    s.seek(0)
    data=s.getbuffer()
    return data
Пример #15
0
Файл: app.py Проект: oplatek/tdb
def send_fig(fig, name):
    """
    sends figure to frontend
    """
    imgdata = BytesIO()
    fig.savefig(imgdata, format='png')
    imgdata.seek(0)  # rewind the data
    uri = 'data:image/png;base64,' + urllib.parse.quote(
        base64.encodebytes(imgdata.getbuffer()))
    send_action("update_plot", params={"src": uri, "name": name})
Пример #16
0
class MessageProtocol(asyncio.Protocol):
    def __init__(self):
        self.len_buff = bytearray(4)
        self.len_offset = 0
        self.msg_len = None
        self.msg_data = BytesIO()
        self.msg_offset = 0
        self.transport = None

    def close(self):
        transport = self.transport
        if transport:
            # Calling transport.close() from an event handler seems to
            # cause errors, even if from a completely unrelated connection
            # (but in the same loop).
            transport._loop.call_soon(transport.close)

    def connection_made(self, transport):
        self.transport = transport

    def connection_lost(self, exc):
        self.transport = None

    def send_msg(self, msg):
        if self.transport:
            self.transport.writelines(msg_to_bytes(msg))

    def data_received(self, data):
        data_offset = 0
        while data_offset != len(data):
            if not self.msg_len:
                remaining = len(self.len_buff) - self.len_offset
                to_add = min(remaining, len(data) - data_offset)
                self.len_buff[self.len_offset:self.len_offset + to_add] = \
                    data[data_offset:data_offset + to_add]
                self.len_offset += to_add
                data_offset += to_add

                if to_add == remaining:
                    (self.msg_len,) = struct.unpack('!I', self.len_buff)
                    self.len_offset = 0
                    self.msg_data.seek(0)
            else:
                remaining = self.msg_len - self.msg_data.tell()
                to_add = min(remaining, len(data) - data_offset)
                self.msg_data.write(data[data_offset:data_offset + to_add])
                data_offset += to_add

                if to_add == remaining:
                    self.process_msg(tuple(msg_from_bytes(self.msg_data.getbuffer()[:self.msg_len])))
                    assert sys.getrefcount(self.msg_data) == 2, "never store message references!"
                    self.msg_len = None

    def process_msg(self, msg):
        raise NotImplementedError()
Пример #17
0
def genJiaozhunN(c,fn):
    tree = ET.parse(fn)
    root = tree.getroot()
    Worksheets=root.findall("{urn:schemas-microsoft-com:office:spreadsheet}Worksheet")  
    Worksheet = Worksheets[0]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    setCell(table,10,8,c.yiqixinghao)
    setCell(table,16,8,c.yiqibh)
    setCell(table,18,8,c.yonghu)
    setCell(table,20,8,c.addr)# setCell(table,18,8).Value =c.yonghu
    d=c.yujifahuo_date
    d1=d+datetime.timedelta(-1)
    setCell(table,32,8,str(d1.year))#年
    setCell(table,32,12,str(d1.month))#月
    setCell(table,32,16,str(d1.day))#日
    d2=d+datetime.timedelta(364)
    setCell(table,35,8,str(d2.year))#年
    setCell(table,35,12,str(d2.month))#月
    setCell(table,35,16,str(d2.day))#日
    #page 2
    Worksheet = Worksheets[1]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    dd="  地点(LOCATION): "+c.yonghu
    setCellWithFont(table,22,3,["地点","(","LOCATION",")",":","    ",c.yonghu])# setCell(table,22,3).Value=dd#setCell(table,22,3,dd)
    #page 3
    Worksheet = Worksheets[2]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    eles=[]
    for i in [11,18]:
        eles.append(getCell(table,13,i))
    stds=[]
    for i in [11,18]:
        stds.append(getCell(table,14,i))
    print(eles,stds)
    (tests,errs)=genTestR(eles,stds)
    tmp=0
    for i in [11,18]:
        setCell(table,15,i,tests[tmp])
        tmp +=1
    tmp=0
    for i in [11,18]:
        setCell(table,16,i,errs[tmp])
        tmp +=1
    #jmd
    save=0.0084
    srsd=1.5/100
    (rs,ave_str,rsd_str)=genjmd(save,srsd)
    sjmd_str=",".join(rs)
    setCell(table,22,3,"   测量值(N/%):" + sjmd_str)
    setCell(table,23,3,"   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%"))
    s=BytesIO()
    tree.write(s, encoding="utf-8", xml_declaration=True, method="xml")
    s.seek(0)
    data=s.getbuffer()
    return data
Пример #18
0
 def get_package(self, name):
     # if isinstance(source, types.ModuleType):
     #     source = os.path.dirname(source.__file__)
     if name:
         target = BytesIO()
         try:
             zipapp.create_archive('wheels/' + name, target=target)
         except zipapp.ZipAppError:
             pass
         else:
             # TODO: hash and cache?
             return target.getbuffer()
Пример #19
0
def write_image(file_path, image):
    file, extension = path.splitext(file_path)
    if extension.lower() == ".webp":
        logging.error("writing .webp %s", os.path.abspath(file_path))
        fp = BytesIO()
        im_pil = Image.fromarray(image)
        im_pil.save(fp, Image.registered_extensions()['.webp'])

        storage.save(file_path, ContentFile(fp.getbuffer()))
    else:
        logging.info("writing jpeg %s %s", file_path, extension)
        encoded, image = cv2.imencode(extension, image)
        storage.save(file_path, ContentFile(image))
Пример #20
0
async def get(url):
    loop = asyncio.get_event_loop()
    buffer = BytesIO()
    id = url[35:].replace('/', " ")
    start = time.time()
    Logger.info("Fetching {0}".format(id))
    for i in range(ATTEMPTS):
        try:
            res = await loop.run_in_executor(None, lambda: requests.get(url, stream=True))
            if res.status_code == 200:
                for chunk in res.iter_content(DEFAULT_BUFFER_SIZE):
                    buffer.write(chunk)
                Logger.info("Fetched {0} completed in {1}s".format(id, time.time() - start))
                if len(buffer.getbuffer()) <= 0:
                    Logger.info("Buffer for {0} is empty ".format(id))
                return buffer.getbuffer()
            else:
                Logger.warn("Request to {0} failed with error code : {1} ".format(url, str(res.status_code)))
        except Exception as e:
            Logger.warn("Request {0} failed with exception : {1}".format(id, str(e)))
            time.sleep(0.5 * i)

    raise Exception("Request failed for {0} after ATTEMPTS attempts".format(url))
Пример #21
0
def genJiaozhunO(c,fn):
    tree = ET.parse(fn)
    root = tree.getroot()
    Worksheets=root.findall("{urn:schemas-microsoft-com:office:spreadsheet}Worksheet")  
    Worksheet = Worksheets[0]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    setCell(table,16,8,c.yiqibh)#setCell(table,10,8).Value =c.yiqixinghao    setCell(table,11,8).Value =c.yiqixinghao
    setCell(table,18,8,c.yonghu)#setCell(table,18,8).Value =c.yonghu
    setCell(table,20,8,c.addr)# setCell(table,18,8).Value =c.yonghu
    d=c.yujifahuo_date
    d1=d+datetime.timedelta(-1)
    setCell(table,32,8,str(d1.year))#setCell(table,32,8).FormulaR1C1 =d1.year#年
    setCell(table,32,12,str(d1.month))#setCell(table,32,12).Value =d1.month#月
    setCell(table,32,16,str(d1.day))#setCell(table,32,16).Value =d1.day#日
    d2=d+datetime.timedelta(364)
    setCell(table,35,8,str(d2.year))#setCell(table,35,8).Value =d2.year#年
    setCell(table,35,12,str(d2.month))#setCell(table,35,12).Value =d2.month#月
    setCell(table,35,16,str(d2.day))#setCell(table,35,16).Value =d2.day#日
    # #page 2
    Worksheet = Worksheets[1]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    #dd="  地点(LOCATION): "+c.yonghu
    setCellWithFont(table,22,3,["地点","(","LOCATION",")",":","    ",c.yonghu])# setCell(table,22,3).Value=dd
    # #page 3
    Worksheet = Worksheets[2]# w = a.Worksheets[2]
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    eles=[]
    for i in [8,15]:
        print("getCell",13,i)
        eles.append(getCell(table,13,i))#.Value)
    stds=[]
    for i in [8,15]:
        stds.append(getCell(table,14,i))#setCell(table,14,i).Value)
    (tests,errs)=genTestR(eles,stds)
    setCell(table,15,8,tests[0]) # setCell(table,15,8).Value=tests[0]
    setCell(table,15,15,tests[1]) # setCell(table,15,15).Value=tests[1]
    setCell(table,16,8,errs[0]) # setCell(table,16,8).Value=errs[0]
    setCell(table,16,15,errs[1]) # setCell(table,16,15).Value=errs[1]
    # #jmd
    cave=0.0134
    crsd=0.74/100
    (rs,ave_str,rsd_str)=genjmd(cave,crsd)
    cjmd_str=",".join(rs)
    setCell(table,20,3,"   测量值(O/%):" +cjmd_str) # setCell(table,20,3).Value="   测量值(O/%):" +cjmd_str
    setCell(table,21,3,"   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%")) # setCell(table,21,3).Value="   平均值:%s,   相对标准偏差:%s" % (ave_str+"%",rsd_str+"%")
    s=BytesIO()
    tree.write(s, encoding="utf-8", xml_declaration=True, method="xml")
    s.seek(0)
    data=s.getbuffer()
    return data
Пример #22
0
def png_buffer(array):
    """Convert an array to PNG, handling transparency in an
    at-least-partially-sane manner."""
    assert array.ndim == 2

    im = toimage(array)
    alpha = toimage(array != 0)
    im.putalpha(alpha)

    # Return format is a buffer of PNG-encoded data
    fp = BytesIO()
    im.save(fp, format='png')

    return fp.getbuffer()
Пример #23
0
def redll(buf, mapping):
    # mapping is a dict of bytes->bytes for dlls to rename
    # Make the section to store the new strings.
    section_offset_mapping = {}
    data_writer = BytesIO()
    for old_dll, new_dll in mapping.items():
        # section_offset_mapping contains lowercased DLL names
        old_dll = old_dll.decode("ascii").lower().encode("ascii")
        data_offset = data_writer.tell()
        data_writer.write(new_dll)
        data_writer.write(b"\x00")
        section_offset_mapping[old_dll] = data_offset
    data = data_writer.getbuffer()

    # I checked python27.dll, and its DLL name RVAs point into section .rdata
    # which has characteristics:
    #   0x40000040
    # = 0x40000000  IMAGE_SCN_MEM_READ
    # +         40  IMAGE_SCN_CNT_INITIALIZED_DATA
    # which sounds about right.
    characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA
    new_buf, new_section_rva = add_section(buf, data,
                                           characteristics)
    rva_mapping = {old_dll: new_section_rva + offset
                   for (old_dll, offset) in section_offset_mapping.items()}

    pe_headers = view_pe_headers(new_buf)

    unused_names = set(rva_mapping)
    def rewrite_names(viewer, name_field):
        for s in viewer(pe_headers):
            name_offset = rva_to_file_offset(pe_headers.sections, s[name_field])
            name, _ = read_asciiz(new_buf, name_offset)
            # lowercase name for case-insensitive matching
            name = name.decode("ascii").lower().encode("ascii")
            if name in rva_mapping:
                # print("RVA: %s -> %s"
                #       % (hex(s[name_field]), hex(rva_mapping[name])))
                s[name_field] = rva_mapping[name]
                unused_names.discard(name)

    rewrite_names(view_import_directory_tables, "Name RVA")
    rewrite_names(view_delay_load_directory_tables, "Name")

    if unused_names:
        warnings.warn(UserWarning,
                      "Did not find any imports from following DLLs: "
                      + ", ".join(str(name) for name in unused_names))

    return new_buf
def test_write_jpeg():
    """See if Pillow can write JPEG (tests linkage against mozjpeg)"""
    im = Image.new('RGB', (10, 10))
    buffer = BytesIO()
    im.save(buffer, format='JPEG')

    if sys.version_info[0] == 2:
        buffer.seek(0)
        size = len(buffer.read())
    else:
        size = buffer.getbuffer().nbytes

    if size != 375:
        logger.error("JPEG optimization is not working as expected! size=%s", size)
Пример #25
0
def main():
   
    minio = Minio(STORAGE_ENDPOINT, access_key=AWSAccessKeyId, secret_key=AWSSecretKey)

    content = BytesIO(b'Hello again')
    
    #Create an SSE_S3 object
    sse_s3_obj = SSE_S3()

    # Put object with from SSE_S3 object which encrypt object in S3 with provided key
    minio.put_object(STORAGE_BUCKET, 'test_crypt.txt', content, content.getbuffer().nbytes, sse=sse_s3_obj)

    # Get decrypted object with same headers
    obj = minio.get_object(STORAGE_BUCKET, 'test_crypt.txt')
   
    print(obj.read())
Пример #26
0
class ColorPreview():
	def __init__(self, color="#000000"):
		self.img = Image.new('RGB', (100, 100), color);
		self.stream = BytesIO();

		self.img.save(self.stream, format="PNG");

		self.stream.seek(0, 0);

	def getBuffer(self):
		return self.stream.getbuffer();

	def getStream(self):
		return self.stream;

	def getValue(self):
		return self.stream.getvalue();
Пример #27
0
def fetch_sram(ctx, sram_file):
    """Fetch a SRAM dump from the Super Wild Card."""
    with serial.Serial(ctx['com_port'], timeout=3) as ser:
        start_time = time()
        ser.write(pack('>10sx', b'READ SRAM'))
        sram = BytesIO()
        with click.progressbar(range(0, BLOCK_SIZE * 4), label='Receiving') as all_bytes:
            for _ in all_bytes:
                byte = ser.read(size=1)
                if byte == b'':
                    raise click.ClickException('Transfer timeout.')
                sram.write(byte)
        trail = ser.read(size=9)
        if trail != b'*#*#*#*OK':
            raise click.ClickException('Transfer failed! ({})'.format(trail))
        sram_file.write(sram.getbuffer())
        click.echo(click.style('Transfer complete in {0:.2f} seconds.'.format(time() - start_time), fg='green'))
Пример #28
0
def genShujubiao(c,fn):
    tree = ET.parse(fn)
    root = tree.getroot()
    Worksheet=root.find("{urn:schemas-microsoft-com:office:spreadsheet}Worksheet")
    table=Worksheet.find("{urn:schemas-microsoft-com:office:spreadsheet}Table")
    if c.yiqixinghao=="CS-2800":
        setCell(table,4,3,getCell(table,4,3)+"√") #setCell(table,4,3).Value =setCell(table,4,3).Value     +"√"
        setCell(table,8,3,"√")
        setCell(table,8,5,"√")
        setCell(table,8,6,"√")
    elif c.yiqixinghao=="CS-3000":
        setCell(table,4,4,getCell(table,4,4)+"√") #setCell(table,4,3).Value =setCell(table,4,3).Value     +"√"
        setCell(table,8,3,"√")
        setCell(table,8,5,"√")
        setCell(table,8,6,"√")
    elif c.yiqixinghao=="O-3000":
        setCell(table,4,5,getCell(table,4,5)+"√")#setCell(table,4,5).Value =setCell(table,4,5).Value     +"√"
        setCell(table,11,3,"√")#setCell(table,11,3).Value ="√"
    elif c.yiqixinghao=="N-3000":
        setCell(table,4,6,getCell(table,4,6)+"√")#setCell(table,4,6).Value =setCell(table,4,6).Value     +"√"
    elif c.yiqixinghao=="ON-3000":
        setCell(table,5,3,getCell(table,5,3)+"√")#setCell(table,5,3).Value =setCell(table,5,3).Value     +"√"
        setCell(table,11,3,"√")#setCell(table,11,3).Value ="√"
        setCell(table,11,4,"√")#setCell(table,11,4).Value ="√"
    elif c.yiqixinghao=="ONH-3000":
        setCell(table,5,4,getCell(table,5,4)+"√")#setCell(table,5,4).Value =setCell(table,5,4).Value     +"√"
        setCell(table,11,3,"√")#setCell(table,11,3).Value ="√"
        setCell(table,11,4,"√")#setCell(table,11,4).Value ="√"
        setCell(table,11,5,"√")#setCell(table,11,3).Value ="√"
    elif c.yiqixinghao=="OH-3000":
        setCell(table,5,6,getCell(table,5,6)+"√")#setCell(table,5,6).Value =setCell(table,5,6).Value     +"√"
        setCell(table,11,3,"√")#setCell(table,11,3).Value ="√"
        setCell(table,11,5,"√")#setCell(table,11,5).Value ="√"
    setCell(table,2,6,"合同号:"+c.hetongbh)#setCell(table,2,6).Value ="合同号:"+c.hetongbh    
    setCell(table,6,3,c.yiqibh) #setCell(table,6,3).Value =c.yiqibh
    setCell(table,3,3,c.yonghu) #setCell(table,3,3).Value =c.yonghu
    d=datetime.datetime.now()
    setCell(table,13,4,str(d.year)+"年") #setCell(table,13,4).FormulaR1C1 =str(d.year)+"年"
    setCell(table,13,5,str(d.month)+"月")#setCell(table,13,5).Value =str(d.month)+"月"
    setCell(table,13,6,str(d.day)+"日")#setCell(table,13,6).Value =str(d.day)+"日"    
    setCell(table,14,3,c.baoxiang)#setCell(table,13,6).Value =str(d.day)+"日"    
    s=BytesIO()
    tree.write(s, encoding="utf-8", xml_declaration=True, method="xml")
    s.seek(0)
    data=s.getbuffer()
    return data
Пример #29
0
        def scan(self, step=None):
            if step:
                self.current_step = step

            logger.debug('Do scan %d/%d' % (self.current_step, self.steps))

            phi = self.current_step * 2 * pi / self.steps
            sample_n = 100
            r_l = 40
            r_r = 50

            # x = sin(theta) * cos(phi) * r
            # y = sin(theta) * sin(phi) * r
            # z = r * cos(theta)
            point_L = []
            point_R = []
            for t in range(sample_n):
                theta = t * 2 * pi / sample_n
                x = sin(theta) * cos(phi)
                y = sin(theta) * sin(phi)
                z = cos(theta)
                point_L.append([x * r_l, y * r_l, z * r_l + r_l / 2, 0, 0, 0])
                point_R.append([x * r_r, y * r_r, z * r_r + r_r / 2, 255, 0, 0])

            # point_L
            # point_R
            buf = BytesIO()

            def packer(s, points):
                for p in points:
                    s.write(
                        struct.pack('<ffffff', p[0], p[1], p[2],
                                    p[3] / 255., p[4] / 255., p[5] / 255.))

            packer(buf, point_L)
            packer(buf, point_R)

            self.send_text('{"status": "chunk", "left": %d, "right": %d}' %
                           (len(point_L), len(point_R)))
            self.send_binary(buf.getbuffer())
            # self.task.forward()
            # from time import sleep
            # sleep(1)
            self.send_ok()
            self.current_step += 1
Пример #30
0
    def serialize(data):
        """

        :param data:
        :return:
        """
        try:
            bio = BytesIO()
            pickle.dump(data, bio, protocol=pickle.HIGHEST_PROTOCOL)
            try:
                # noinspection PyUnresolvedReferences
                pickled_data = bio.getbuffer()
            except AttributeError:
                pickled_data = bio.getvalue()
        except ImportError:
            pickled_data = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)

        return pickled_data
Пример #31
0
def render_and_send_app(pk):
    app = import_module('venueapp.models').Application.objects.get(pk=pk)
    cover = import_module('venueapp.views').make_cover_page(app)
    max_bytes = config.get_int("max_inline_attachment_bytes", 0)
    for venue in app.venues.all():
        html = render_to_string(
            "venueapp/pdf_app.html", {
                "object": app,
                "cover": cover,
                "venue": venue,
                "logo": finders.find("logo.png"),
                "pdf": True,
                "max_attachment_size": max_bytes,
            })
        doc = HTML(string=html, base_url=settings.SITE_URL).render()
        bookmark_tree = doc.make_bookmark_tree()
        bookmarks = list([_Bookmark(i) for i in bookmark_tree])
        app_pdf = BytesIO()
        doc.write_pdf(app_pdf)
        merger = PdfFileMerger()
        merger.append(app_pdf, import_bookmarks=False)
        for staff in app.staffmember_set.signed_on().filter(
                role__accepts_attachment=True).exclude(
                    Q(attachment=None) | Q(attachment="")):
            name = "{} {}'s ".format(staff.role_name, staff.person)
            try:
                if staff.attachment.size < max_bytes:
                    reader = PdfFileReader(staff.attachment.open(), False)
                    attachment_pages = reader.getNumPages()
                    page = None
                    for i, bookmark in enumerate(bookmarks):
                        if bookmark.label == name + "Supplement":
                            page = bookmarks[i + 1].location
                    if page:
                        merger.merge(page,
                                     staff.attachment.open(),
                                     import_bookmarks=False)
                        for i in bookmarks:
                            if i.location >= page:
                                i.location += attachment_pages
                    else:
                        merger.append(staff.attachment.open(),
                                      bookmark=name + "Attachment",
                                      import_bookmarks=False)
            except Exception as e:
                tb.print_exc()
        for i in bookmarks:
            merger.addBookmark(i.label, i.location)
        pdf = BytesIO()  # open("/tmp/{}.pdf".format(venue.venue), "wb")
        merger.write(pdf)
        msg = render_msg("venueapp/email/submission.html",
                         locals(),
                         to=[
                             "{} <{}>".format(i.get_full_name(False), i.email)
                             for i in venue.managers.all()
                         ],
                         cc=[
                             "{} <{}>".format(i.get_full_name(False), i.email)
                             for i in app.show.staff.all()
                         ],
                         subject="Application for {} in {} Submitted".format(
                             app, venue.venue),
                         tags=["venueapp", "venueapp-submission"])
        msg.attach("{} - {}.pdf".format(app, venue), BytesIO(pdf.getbuffer()),
                   "application/pdf")
        try:
            msg.send()
        except Exception as err:
            LOGGER.error("Application submission sending failed: {}".format(
                repr(err)))
            tb.print_exc()
        finally:
            merger.close()
Пример #32
0
def t_time(thDate):

    data_dic = thDate

    try:
        # 로그파일 불러오기
        df = pd.read_csv("tempHumid-" + data_dic + ".log",
                         names=['Datetime', 'etc'],
                         header=None,
                         index_col='Datetime')

        # 인덱스를 datetimeindex로 바꾸기
        df.index = pd.to_datetime(df.index)

        # etc 컬럼 나누기
        df[['etc1', 'etc2', 'etc3', 'Temp', 'Humidity']] = \
            df['etc'].str.split(' ', n=5, expand=True)

        # Temp 컬럼에서 숫자만 빼오기, str->numeric
        df['Temp'] = df['Temp'].str.slice(start=5, stop=-1)
        df['Temp'] = df['Temp'].apply(pd.to_numeric)

        # Generate the figure **without using pyplot**.
        fig, ax = plt.subplots()

        # 습도를 그래프로 그림, 선색 지정
        ax.plot(df['Temp'], color='#ff0303')

        # 그래프 타이틀
        title = '{}.{}.{}'.format(df.index.year[0], df.index.month[0], df.index.day[0])\
            +'\'s temperature\n'\
            +'──────────────────────\n'\
            +'Highest: {}℃ / '.format(df['Temp'].max())\
            +'Lowest: {}℃'.format(df['Temp'].min())

        font = {'size': 20}
        plt.title(title, fontdict=font)

        # x축, y축 라벨
        ax.set_xlabel('Time', size=16)
        ax.set_ylabel('Temperature(℃)', size=16)

        # 주석 달았을 때 그래프 밖으로 삐져 나오는 것 방지
        ax.spines['top'].set_color('none')
        ax.spines['right'].set_color('none')

        # x축 간격 지정, 5분
        hours = mdates.HourLocator(interval=1)
        h_fmt = mdates.DateFormatter('%H:%M:%S')
        ax.xaxis.set_major_locator(hours)
        ax.xaxis.set_major_formatter(h_fmt)

        # Show the major grid lines
        plt.grid(b=True, which='major', color='#b3b3b3', linestyle='-')

        # # Show the minor grid lines
        # plt.minorticks_on()
        # plt.grid(b=True, which='minor', color='#a6a6a6', linestyle='--', alpha=0.1)

        # 최고 온도일 때 주석
        # df_cond_max = df[df['Temp'] == df['Temp'].max()]
        # df_max = df_cond_max.loc[:, ['Temp']]
        # max_idx = mpl.dates.date2num(df_max.index.to_pydatetime())
        # 최고 온도가 2개 이상일 때 처리
        # arrowprops = dict(arrowstyle="->")
        # for i in range(0, len(max_idx)):
        #     plt.annotate('{}'.format(df['Temp'].max()),\
        #         xy=(max_idx[i], df['Temp'].max()),\
        #         xytext=(max_idx[i]+0.0005, df['Temp'].max()),\
        #         horizontalalignment='left', verticalalignment='top', color='#154a31'
        #         )

        # 최저 온도일 때 주석
        # df_cond_min = df[df['Temp'] == df['Temp'].min()]
        # df_min = df_cond_min.loc[:, ['Temp']]
        # min_idx = mpl.dates.date2num(df_min.index.to_pydatetime())
        # 최저 온도가 2개 이상일 때 처리
        # for i in range(0, len(min_idx)):
        #     plt.annotate('{}'.format(df['Temp'].min()),\
        #         xy=(min_idx[i], df['Temp'].min()),\
        #         xytext=(min_idx[i]+0.0005, df['Temp'].min()),\
        #         horizontalalignment='left', verticalalignment='top', color='#154a31',
        #         )

        # Save it to a temporary buffer.
        buf = BytesIO()
        fig.set_size_inches(12, 9)
        fig.savefig(buf, format="png", dpi=70)

        # Embed the result in the html output.
        data = base64.b64encode(buf.getbuffer()).decode("ascii")

        return f"<img src='data:image/png;base64,{data}'/>"

    except:
        return "Error"
Пример #33
0
 def _download_results(self, result_dict):
     results = ResultFiles()
     if 'metadata' in result_dict:
         if 'reference' in result_dict['metadata']:
             try:
                 logger.debug("Downloading CTS/VTS log from: %s" %
                              result_dict['metadata']['reference'])
                 self.tradefed_results_url = result_dict['metadata'][
                     'reference']
                 result_tarball_request = requests.get(
                     self.tradefed_results_url)
                 if result_tarball_request.status_code == 200:
                     result_tarball_request.raw.decode_content = True
                     r = BytesIO(result_tarball_request.content)
                     results.tradefed_zipfile = ExtractedResult()
                     results.tradefed_zipfile.contents = r
                     results.tradefed_zipfile.length = len(
                         result_tarball_request.content)
                     results.tradefed_zipfile.name = result_tarball_request.url.rsplit(
                         "/", 1)[1]
                     results.tradefed_zipfile.mimetype = result_tarball_request.headers.get(
                         "Content-Type")
                     logger.debug("Retrieved %s bytes" %
                                  r.getbuffer().nbytes)
                     t = tarfile.open(fileobj=r, mode='r:xz')
                     for member in t.getmembers():
                         logger.debug("Available member: %s" % member.name)
                         if "test_result.xml" in member.name:
                             results.test_results = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "test_results object is empty: %s" %
                                 (results.test_results is None))
                         if "compatibility_result.xsl" in member.name:
                             results.test_result_xslt = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "test_result_xslt object is empty: %s" %
                                 (results.test_result_xslt is None))
                         if "compatibility_result.css" in member.name:
                             results.test_result_css = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "test_result_css object is empty: %s" %
                                 (results.test_result_css is None))
                         if "logo.png" in member.name:
                             results.test_result_image = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "test_result_image object is empty: %s" %
                                 (results.test_result_image is None))
                         if "tradefed-stdout.txt" in member.name:
                             results.tradefed_stdout = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "tradefed_stdout object is empty: %s" %
                                 (results.tradefed_stdout is None))
                         if "tradefed-logcat.txt" in member.name:
                             results.tradefed_logcat = self._extract_member(
                                 t, member)
                             logger.debug(
                                 "tradefed_logcat object is empty: %s" %
                                 (results.tradefed_logcat is None))
             except tarfile.TarError as e:
                 logger.warning(e)
             except EOFError as e:
                 # this can happen when tarfile is corrupted
                 logger.warning(e)
             except requests.exceptions.Timeout as e:
                 logger.warning(e)
     return results
Пример #34
0
    def setUp(self):
        self.maxDiff = None
        conn = engine.connect()
        text = (f"INSERT INTO projects (uuid, name, created_at, updated_at) "
                f"VALUES (%s, %s, %s, %s)")
        conn.execute(text, (
            PROJECT_ID,
            NAME,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            EXPERIMENT_ID,
            NAME,
            PROJECT_ID,
            POSITION,
            1,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            EXPERIMENT_ID_2,
            NAME,
            PROJECT_ID,
            POSITION,
            1,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            EXPERIMENT_ID_3,
            NAME,
            PROJECT_ID,
            POSITION,
            1,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO tasks (uuid, name, description, image, commands, arguments, tags, parameters, experiment_notebook_path, deployment_notebook_path, is_default, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            TASK_ID,
            NAME,
            DESCRIPTION,
            IMAGE,
            COMMANDS_JSON,
            ARGUMENTS_JSON,
            TAGS_JSON,
            dumps([]),
            EXPERIMENT_NOTEBOOK_PATH,
            DEPLOYMENT_NOTEBOOK_PATH,
            0,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            OPERATOR_ID,
            EXPERIMENT_ID,
            TASK_ID,
            PARAMETERS_JSON,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            OPERATOR_ID_2,
            EXPERIMENT_ID_2,
            TASK_ID,
            PARAMETERS_JSON_2,
            CREATED_AT,
            UPDATED_AT,
        ))

        text = (
            f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
            f"VALUES (%s, %s, %s, %s, %s, %s)")
        conn.execute(text, (
            OPERATOR_ID_3,
            EXPERIMENT_ID_3,
            TASK_ID,
            PARAMETERS_JSON_3,
            CREATED_AT,
            UPDATED_AT,
        ))
        conn.close()

        # uploads mock dataset
        try:
            MINIO_CLIENT.make_bucket(BUCKET_NAME)
        except BucketAlreadyOwnedByYou:
            pass

        file = BytesIO((b'col0,col1,col2,col3,col4,col5\n'
                        b'01/01/2000,5.1,3.5,1.4,0.2,Iris-setosa\n'
                        b'01/01/2000,5.1,3.5,1.4,0.2,Iris-setosa\n'
                        b'01/01/2000,5.1,3.5,1.4,0.2,Iris-setosa\n'))
        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=f"datasets/{DATASET}/{DATASET}",
            data=file,
            length=file.getbuffer().nbytes,
        )
        metadata = {
            "columns": ["col0", "col1", "col2", "col3", "col4", "col5"],
            "featuretypes": [
                DATETIME, NUMERICAL, NUMERICAL, NUMERICAL, NUMERICAL,
                CATEGORICAL
            ],
            "filename":
            DATASET,
            "run_id":
            RUN_ID,
        }
        buffer = BytesIO(dumps(metadata).encode())
        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=f"datasets/{DATASET}/{DATASET}.metadata",
            data=buffer,
            length=buffer.getbuffer().nbytes,
        )

        file = BytesIO((b'foo,bar,baz,qux\n'
                        b'01/01/2000,foo,1.2,2.3\n'
                        b'01/01/2000,bar,2.3,3.4\n'
                        b'01/01/2000,baz,4.5,4.5\n'))
        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=f"datasets/{DATASET_2}/{DATASET_2}",
            data=file,
            length=file.getbuffer().nbytes,
        )
        metadata = {
            "columns": ["foo", "bar", "baz", "qux"],
            "featuretypes": [DATETIME, CATEGORICAL, NUMERICAL, NUMERICAL],
            "filename": DATASET_2,
            "runId": None
        }
        buffer = BytesIO(dumps(metadata).encode())

        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=f"datasets/{DATASET_2}/{DATASET_2}.metadata",
            data=buffer,
            length=buffer.getbuffer().nbytes,
        )

        MINIO_CLIENT.copy_object(
            bucket_name=BUCKET_NAME,
            object_name=
            f"datasets/{DATASET}/runs/{RUN_ID}/operators/{OPERATOR_ID}/{DATASET}/{DATASET}",
            object_source=f"/{BUCKET_NAME}/datasets/{DATASET}/{DATASET}",
        )
        MINIO_CLIENT.copy_object(
            bucket_name=BUCKET_NAME,
            object_name=
            f"datasets/{DATASET}/runs/{RUN_ID}/operators/{OPERATOR_ID}/{DATASET}/{DATASET}.metadata",
            object_source=
            f"/{BUCKET_NAME}/datasets/{DATASET}/{DATASET}.metadata",
        )
Пример #35
0
def handler(event, context):
    # Load settings
    SETTINGS = BossSettings.load()

    # Used as a guard against trying to delete the SQS message when lambda is
    # triggered by SQS.
    sqs_triggered = 'Records' in event and len(event['Records']) > 0

    if sqs_triggered :
        # Lambda invoked by an SQS trigger.
        msg_data = json.loads(event['Records'][0]['body'])
        # Load the project info from the chunk key you are processing
        chunk_key = msg_data['chunk_key']
        proj_info = BossIngestProj.fromSupercuboidKey(chunk_key)
        proj_info.job_id = msg_data['ingest_job']
    else:
        # Standard async invoke of this lambda.

        # Load the project info from the chunk key you are processing
        proj_info = BossIngestProj.fromSupercuboidKey(event["chunk_key"])
        proj_info.job_id = event["ingest_job"]

        # Get message from SQS ingest queue, try for ~2 seconds
        rx_cnt = 0
        msg_data = None
        msg_id = None
        msg_rx_handle = None
        while rx_cnt < 6:
            ingest_queue = IngestQueue(proj_info)
            try:
                msg = [x for x in ingest_queue.receiveMessage()]
            # StopIteration may be converted to a RunTimeError.
            except (StopIteration, RuntimeError):
                msg = None

            if msg:
                msg = msg[0]
                print("MESSAGE: {}".format(msg))
                print(len(msg))
                msg_id = msg[0]
                msg_rx_handle = msg[1]
                msg_data = json.loads(msg[2])
                print("MESSAGE DATA: {}".format(msg_data))
                break
            else:
                rx_cnt += 1
                print("No message found. Try {} of 6".format(rx_cnt))
                time.sleep(1)

        if not msg_id:
            # No tiles ready to ingest.
            print("No ingest message available")
            return

        # Get the chunk key of the tiles to ingest.
        chunk_key = msg_data['chunk_key']


    tile_error_queue = TileErrorQueue(proj_info)

    print("Ingesting Chunk {}".format(chunk_key))
    tiles_in_chunk = int(chunk_key.split('&')[1])

    # Setup SPDB instance
    sp = SpatialDB(msg_data['parameters']["KVIO_SETTINGS"],
                   msg_data['parameters']["STATEIO_CONFIG"],
                   msg_data['parameters']["OBJECTIO_CONFIG"])

    # Get tile list from Tile Index Table
    tile_index_db = BossTileIndexDB(proj_info.project_name)
    # tile_index_result (dict): keys are S3 object keys of the tiles comprising the chunk.
    tile_index_result = tile_index_db.getCuboid(msg_data["chunk_key"], int(msg_data["ingest_job"]))
    if tile_index_result is None:
        # If chunk_key is gone, another lambda uploaded the cuboids and deleted the chunk_key afterwards.
        if not sqs_triggered:
            # Remove message so it's not redelivered.
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)

        print("Aborting due to chunk key missing from tile index table")
        return

    # Sort the tile keys
    print("Tile Keys: {}".format(tile_index_result["tile_uploaded_map"]))
    tile_key_list = [x.rsplit("&", 2) for x in tile_index_result["tile_uploaded_map"].keys()]
    if len(tile_key_list) < tiles_in_chunk:
        print("Not a full set of 16 tiles. Assuming it has handled already, tiles: {}".format(len(tile_key_list)))
        if not sqs_triggered:
            ingest_queue.deleteMessage(msg_id, msg_rx_handle)
        return
    tile_key_list = sorted(tile_key_list, key=lambda x: int(x[1]))
    tile_key_list = ["&".join(x) for x in tile_key_list]
    print("Sorted Tile Keys: {}".format(tile_key_list))

    # Augment Resource JSON data so it will instantiate properly that was pruned due to S3 metadata size limits
    resource_dict = msg_data['parameters']['resource']
    _, exp_name, ch_name = resource_dict["boss_key"].split("&")

    resource_dict["channel"]["name"] = ch_name
    resource_dict["channel"]["description"] = ""
    resource_dict["channel"]["sources"] = []
    resource_dict["channel"]["related"] = []
    resource_dict["channel"]["default_time_sample"] = 0
    resource_dict["channel"]["downsample_status"] = "NOT_DOWNSAMPLED"

    resource_dict["experiment"]["name"] = exp_name
    resource_dict["experiment"]["description"] = ""
    resource_dict["experiment"]["num_time_samples"] = 1
    resource_dict["experiment"]["time_step"] = None
    resource_dict["experiment"]["time_step_unit"] = None

    resource_dict["coord_frame"]["name"] = "cf"
    resource_dict["coord_frame"]["name"] = ""
    resource_dict["coord_frame"]["x_start"] = 0
    resource_dict["coord_frame"]["x_stop"] = 100000
    resource_dict["coord_frame"]["y_start"] = 0
    resource_dict["coord_frame"]["y_stop"] = 100000
    resource_dict["coord_frame"]["z_start"] = 0
    resource_dict["coord_frame"]["z_stop"] = 100000
    resource_dict["coord_frame"]["voxel_unit"] = "nanometers"

    # Setup the resource
    resource = BossResourceBasic()
    resource.from_dict(resource_dict)
    dtype = resource.get_numpy_data_type()

    # read all tiles from bucket into a slab
    tile_bucket = TileBucket(proj_info.project_name)
    data = []
    num_z_slices = 0
    for tile_key in tile_key_list:
        try:
            image_data, message_id, receipt_handle, metadata = tile_bucket.getObjectByKey(tile_key)
        except KeyError:
            print('Key: {} not found in tile bucket, assuming redelivered SQS message and aborting.'.format(
                tile_key))
            if not sqs_triggered:
                # Remove message so it's not redelivered.
                ingest_queue.deleteMessage(msg_id, msg_rx_handle)
            print("Aborting due to missing tile in bucket")
            return

        image_bytes = BytesIO(image_data)
        image_size = image_bytes.getbuffer().nbytes

        # Get tiles size from metadata, need to shape black tile if actual tile is corrupt.
        if 'x_size' in metadata:
            tile_size_x = metadata['x_size']
        else:
            print('MetadataMissing: x_size not in tile metadata:  using 1024.')
            tile_size_x = 1024

        if 'y_size' in metadata:
            tile_size_y = metadata['y_size']
        else:
            print('MetadataMissing: y_size not in tile metadata:  using 1024.')
            tile_size_y = 1024

        if image_size == 0:
            print('TileError: Zero length tile, using black instead: {}'.format(tile_key))
            error_msg = 'Zero length tile'
            enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
            tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
        else:
            try:
                # DP NOTE: Issues when specifying dtype in the asarray function with Pillow ver 8.3.1. 
                # Fixed by separating array instantiation and dtype assignment. 
                tile_img = np.asarray(Image.open(image_bytes))
                tile_img = tile_img.astype(dtype)
            except TypeError as te:
                print('TileError: Incomplete tile, using black instead (tile_size_in_bytes, tile_key): {}, {}'
                      .format(image_size, tile_key))
                error_msg = 'Incomplete tile'
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)
            except OSError as oe:
                print('TileError: OSError, using black instead (tile_size_in_bytes, tile_key): {}, {} ErrorMessage: {}'
                      .format(image_size, tile_key, oe))
                error_msg = 'OSError: {}'.format(oe)
                enqueue_tile_error(tile_error_queue, tile_key, chunk_key, error_msg)
                tile_img = np.zeros((tile_size_x, tile_size_y), dtype=dtype)

        data.append(tile_img)
        num_z_slices += 1


    # Make 3D array of image data. It should be in XYZ at this point
    chunk_data = np.array(data)
    del data
    tile_dims = chunk_data.shape

    # Break into Cube instances
    print("Tile Dims: {}".format(tile_dims))
    print("Num Z Slices: {}".format(num_z_slices))
    num_x_cuboids = int(math.ceil(tile_dims[2] / CUBOIDSIZE[proj_info.resolution][0]))
    num_y_cuboids = int(math.ceil(tile_dims[1] / CUBOIDSIZE[proj_info.resolution][1]))

    print("Num X Cuboids: {}".format(num_x_cuboids))
    print("Num Y Cuboids: {}".format(num_y_cuboids))

    chunk_key_parts = BossUtil.decode_chunk_key(chunk_key)
    t_index = chunk_key_parts['t_index']
    for x_idx in range(0, num_x_cuboids):
        for y_idx in range(0, num_y_cuboids):
            # TODO: check time series support
            cube = Cube.create_cube(resource, CUBOIDSIZE[proj_info.resolution])
            cube.zeros()

            # Compute Morton ID
            # TODO: verify Morton indices correct!
            print(chunk_key_parts)
            morton_x_ind = x_idx + (chunk_key_parts["x_index"] * num_x_cuboids)
            morton_y_ind = y_idx + (chunk_key_parts["y_index"] * num_y_cuboids)
            print("Morton X: {}".format(morton_x_ind))
            print("Morton Y: {}".format(morton_y_ind))
            morton_index = XYZMorton([morton_x_ind, morton_y_ind, int(chunk_key_parts['z_index'])])

            # Insert sub-region from chunk_data into cuboid
            x_start = x_idx * CUBOIDSIZE[proj_info.resolution][0]
            x_end = x_start + CUBOIDSIZE[proj_info.resolution][0]
            x_end = min(x_end, tile_dims[2])
            y_start = y_idx * CUBOIDSIZE[proj_info.resolution][1]
            y_end = y_start + CUBOIDSIZE[proj_info.resolution][1]
            y_end = min(y_end, tile_dims[1])
            z_end = CUBOIDSIZE[proj_info.resolution][2]
            # TODO: get sub-array w/o making a copy.
            print("Yrange: {}".format(y_end - y_start))
            print("Xrange: {}".format(x_end - x_start))
            print("X start: {}".format(x_start))
            print("X stop: {}".format(x_end))
            cube.data[0, 0:num_z_slices, 0:(y_end - y_start), 0:(x_end - x_start)] = chunk_data[0:num_z_slices,
                                                                                 y_start:y_end, x_start:x_end]

            # Create object key
            object_key = sp.objectio.generate_object_key(resource, proj_info.resolution, t_index, morton_index)
            print("Object Key: {}".format(object_key))

            # Put object in S3
            sp.objectio.put_objects([object_key], [cube.to_blosc()])

            # Add object to index
            sp.objectio.add_cuboid_to_index(object_key, ingest_job=int(msg_data["ingest_job"]))

            # Update id indices if this is an annotation channel
            # We no longer index during ingest.
            #if resource.data['channel']['type'] == 'annotation':
            #   try:
            #       sp.objectio.update_id_indices(
            #           resource, proj_info.resolution, [object_key], [cube.data])
            #   except SpdbError as ex:
            #       sns_client = boto3.client('sns')
            #       topic_arn = msg_data['parameters']["OBJECTIO_CONFIG"]["prod_mailing_list"]
            #       msg = 'During ingest:\n{}\nCollection: {}\nExperiment: {}\n Channel: {}\n'.format(
            #           ex.message,
            #           resource.data['collection']['name'],
            #           resource.data['experiment']['name'],
            #           resource.data['channel']['name'])
            #       sns_client.publish(
            #           TopicArn=topic_arn,
            #           Subject='Object services misuse',
            #           Message=msg)

    lambda_client = boto3.client('lambda', region_name=SETTINGS.REGION_NAME)

    names = AWSNames.from_lambda(context.function_name)

    delete_tiles_data = {
        'tile_key_list': tile_key_list,
        'region': SETTINGS.REGION_NAME,
        'bucket': tile_bucket.bucket.name
    }

    # Delete tiles from tile bucket.
    lambda_client.invoke(
        FunctionName=names.delete_tile_objs.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tiles_data).encode()
    )       

    delete_tile_entry_data = {
        'tile_index': tile_index_db.table.name,
        'region': SETTINGS.REGION_NAME,
        'chunk_key': chunk_key,
        'task_id': msg_data['ingest_job']
    }

    # Delete entry from tile index.
    lambda_client.invoke(
        FunctionName=names.delete_tile_index_entry.lambda_,
        InvocationType='Event',
        Payload=json.dumps(delete_tile_entry_data).encode()
    )       

    if not sqs_triggered:
        # Delete message since it was processed successfully
        ingest_queue.deleteMessage(msg_id, msg_rx_handle)
Пример #36
0
def _binary_to_bytesio(binary: io.BytesIO):
    if isinstance(binary, io.BytesIO):
        return binary.getbuffer()
    return io.BytesIO(binary).getbuffer()
Пример #37
0
    def setUp(self):
        self.maxDiff = None
        conn = engine.connect()
        text = (
            f"INSERT INTO tasks (uuid, name, description, image, commands, arguments, tags, experiment_notebook_path, deployment_notebook_path, is_default, created_at, updated_at) "
            f"VALUES ('{TASK_ID}', '{NAME}', '{DESCRIPTION}', '{IMAGE}', '{COMMANDS_JSON}', '{ARGUMENTS_JSON}', '{TAGS_JSON}', '{EXPERIMENT_NOTEBOOK_PATH}', '{DEPLOYMENT_NOTEBOOK_PATH}', 0, '{CREATED_AT}', '{UPDATED_AT}')"
        )
        conn.execute(text)
        text = (
            f"INSERT INTO tasks (uuid, name, description, image, commands, arguments, tags, experiment_notebook_path, deployment_notebook_path, is_default, created_at, updated_at) "
            f"VALUES ('{TASK_ID_2}', 'foo 2', '{DESCRIPTION}', '{IMAGE}', '{COMMANDS_JSON}', '{ARGUMENTS_JSON}', '{TAGS_JSON}', '{EXPERIMENT_NOTEBOOK_PATH_2}', '{DEPLOYMENT_NOTEBOOK_PATH_2}', 0, '{CREATED_AT}', '{UPDATED_AT}')"
        )
        conn.execute(text)
        conn.close()

        try:
            MINIO_CLIENT.make_bucket(BUCKET_NAME)
        except BucketAlreadyOwnedByYou:
            pass

        file = BytesIO(SAMPLE_NOTEBOOK.encode("utf-8"))
        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=EXPERIMENT_NOTEBOOK_PATH[len(f"minio://{BUCKET_NAME}/"
                                                     ):],
            data=file,
            length=file.getbuffer().nbytes,
        )

        file = BytesIO(
            b'{"cells":[{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"nbformat":4,"nbformat_minor":4}'
        )
        MINIO_CLIENT.put_object(
            bucket_name=BUCKET_NAME,
            object_name=DEPLOYMENT_NOTEBOOK_PATH[len(f"minio://{BUCKET_NAME}/"
                                                     ):],
            data=file,
            length=file.getbuffer().nbytes,
        )

        session = requests.Session()
        session.cookies.update(COOKIES)
        session.headers.update(HEADERS)
        session.hooks = {
            "response": lambda r, *args, **kwargs: r.raise_for_status(),
        }

        session.put(
            url=f"{JUPYTER_ENDPOINT}/api/contents/tasks",
            data=dumps({
                "type": "directory",
                "content": None
            }),
        )

        session.put(
            url=f"{JUPYTER_ENDPOINT}/api/contents/tasks/{NAME}",
            data=dumps({
                "type": "directory",
                "content": None
            }),
        )

        session.put(
            url=
            f"{JUPYTER_ENDPOINT}/api/contents/tasks/{NAME}/Deployment.ipynb",
            data=dumps({
                "type": "notebook",
                "content": loads(SAMPLE_NOTEBOOK)
            }),
        )

        session.put(
            url=
            f"{JUPYTER_ENDPOINT}/api/contents/tasks/{NAME}/Experiment.ipynb",
            data=dumps({
                "type": "notebook",
                "content": loads(SAMPLE_NOTEBOOK)
            }),
        )
Пример #38
0
            write_uint32(f, texsection_size)

            f.seek(texdata_end)

            f.write(b"DNOS")
            sound_size_offset = f.tell()
            f.write(b"FOOO")
            write_uint32(f, len(resinfo["Level name"]))
            f.write(bytes(resinfo["Level name"], encoding="ascii"))

            f.write(b"HFSB")
            write_uint32(f, 4)
            write_uint32(f, len(sounds) // 2)

            for entry in sounds:
                #f.write(entry.data)
                entry.write(f)

            end = f.tell()
            sound_section_size = end - sound_size_offset - 4
            f.seek(sound_size_offset)
            write_uint32(f, sound_section_size)
            f.seek(end)

            for entry in itertools.chain(models, animations, effects, scripts):
                #f.write(entry.data)
                entry.write(f)

        with bwopen(output, "wb") as final:
            final.write(f.getbuffer())
Пример #39
0
def test_file_flow(file_service, location, example_file_record,
                   identity_simple):
    """Test the lifecycle of a file.

    - Initialize file saving
    - Save 3 files
    - Commit the files
    - List files of the record
    - Read file metadata
    - Retrieve a file
    - Delete a file
    - Delete all remaining files
    - List should be empty
    """
    recid = example_file_record['id']
    file_to_initialise = [{
        'key': 'article.txt',
        'checksum': 'md5:c785060c866796cc2a1708c997154c8e',
        'size': 17,  # 2kB
        'metadata': {
            'description': 'Published article PDF.',
        }
    }]
    # Initialize file saving
    result = file_service.init_files(recid, identity_simple,
                                     file_to_initialise)
    assert result.to_dict()['entries'][0]['key'] == \
        file_to_initialise[0]['key']
    # # Save 3 files
    # to_files = ['one', 'two', 'three']

    # for to_file in to_files:
    content = BytesIO(b'test file content')
    result = file_service.set_file_content(recid, file_to_initialise[0]['key'],
                                           identity_simple, content,
                                           content.getbuffer().nbytes)
    # TODO figure response for succesfully saved file
    assert result.to_dict()['key'] == file_to_initialise[0]['key']

    result = file_service.commit_file(recid, 'article.txt', identity_simple)
    # TODO currently there is no status in the json between the initialisation
    # and the commiting.
    assert result.to_dict()['key'] == \
        file_to_initialise[0]['key']

    # List files
    result = file_service.list_files(recid, identity_simple)
    assert result.to_dict()['entries'][0]['key'] == \
        file_to_initialise[0]['key']

    # Read file metadata
    result = file_service.read_file_metadata(recid, 'article.txt',
                                             identity_simple)
    assert result.to_dict()['key'] == \
        file_to_initialise[0]['key']

    # Retrieve file
    result = file_service.get_file_content(recid, 'article.txt',
                                           identity_simple)
    assert result.file_id == 'article.txt'

    # Delete file
    result = file_service.delete_file(recid, 'article.txt', identity_simple, 0)
    assert result.file_id == 'article.txt'

    # Assert deleted
    result = file_service.list_files(recid, identity_simple)
    assert result.files
    assert not result.files.get('article.txt')

    # Delete all remaining files
    result = file_service.delete_all_files(recid, identity_simple)
    assert result.files == {}

    # Assert deleted
    result = file_service.list_files(recid, identity_simple)
    assert result.files == {}
Пример #40
0
def write_file(buffer: io.BytesIO,
               uri: str,
               buffer_size: int = io.DEFAULT_BUFFER_SIZE) -> None:
    with open_buffered_stream_writer(uri,
                                     buffer_size=buffer_size) as output_stream:
        output_stream.write(buffer.getbuffer())
Пример #41
0
class Packer(object):
    """
    MessagePack Packer

    Usage::

        packer = Packer()
        astream.write(packer.pack(a))
        astream.write(packer.pack(b))

    Packer's constructor has some keyword arguments:

    :param callable default:
        Convert user type to builtin type that Packer supports.
        See also simplejson's document.

    :param bool use_single_float:
        Use single precision float type for float. (default: False)

    :param bool autoreset:
        Reset buffer after each pack and return its content as `bytes`. (default: True).
        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.

    :param bool use_bin_type:
        Use bin type introduced in msgpack spec 2.0 for bytes.
        It also enables str8 type for unicode. (default: True)

    :param bool strict_types:
        If set to true, types will be checked to be exact. Derived classes
        from serializable types will not be serialized and will be
        treated as unsupported type and forwarded to default.
        Additionally tuples will not be serialized as lists.
        This is useful when trying to implement accurate serialization
        for python types.

    :param bool datetime:
        If set to true, datetime with tzinfo is packed into Timestamp type.
        Note that the tzinfo is stripped in the timestamp.
        You can get UTC datetime with `timestamp=3` option of the Unpacker.
        (Python 2 is not supported).

    :param str unicode_errors:
        The error handler for encoding unicode. (default: 'strict')
        DO NOT USE THIS!!  This option is kept for very specific usage.

    Example of streaming deserialize from file-like object::

        unpacker = Unpacker(file_like)
        for o in unpacker:
            process(o)

    Example of streaming deserialize from socket::

        unpacker = Unpacker()
        while True:
            buf = sock.recv(1024**2)
            if not buf:
                break
            unpacker.feed(buf)
            for o in unpacker:
                process(o)

    Raises ``ExtraData`` when *packed* contains extra bytes.
    Raises ``OutOfData`` when *packed* is incomplete.
    Raises ``FormatError`` when *packed* is not valid msgpack.
    Raises ``StackError`` when *packed* contains too nested.
    Other exceptions can be raised during unpacking.
    """
    def __init__(
        self,
        default=None,
        use_single_float=False,
        autoreset=True,
        use_bin_type=True,
        strict_types=False,
        datetime=False,
        unicode_errors=None,
    ):
        self._strict_types = strict_types
        self._use_float = use_single_float
        self._autoreset = autoreset
        self._use_bin_type = use_bin_type
        self._buffer = StringIO()
        if PY2 and datetime:
            raise ValueError("datetime is not supported in Python 2")
        self._datetime = bool(datetime)
        self._unicode_errors = unicode_errors or "strict"
        if default is not None:
            if not callable(default):
                raise TypeError("default must be callable")
        self._default = default

    def _pack(
        self,
        obj,
        nest_limit=DEFAULT_RECURSE_LIMIT,
        check=isinstance,
        check_type_strict=_check_type_strict,
    ):
        default_used = False
        if self._strict_types:
            check = check_type_strict
            list_types = list
        else:
            list_types = (list, tuple)
        while True:
            if nest_limit < 0:
                raise ValueError("recursion limit exceeded")
            if obj is None:
                return self._buffer.write(b"\xc0")
            if check(obj, bool):
                if obj:
                    return self._buffer.write(b"\xc3")
                return self._buffer.write(b"\xc2")
            if check(obj, int_types):
                if 0 <= obj < 0x80:
                    return self._buffer.write(struct.pack("B", obj))
                if -0x20 <= obj < 0:
                    return self._buffer.write(struct.pack("b", obj))
                if 0x80 <= obj <= 0xFF:
                    return self._buffer.write(struct.pack("BB", 0xCC, obj))
                if -0x80 <= obj < 0:
                    return self._buffer.write(struct.pack(">Bb", 0xD0, obj))
                if 0xFF < obj <= 0xFFFF:
                    return self._buffer.write(struct.pack(">BH", 0xCD, obj))
                if -0x8000 <= obj < -0x80:
                    return self._buffer.write(struct.pack(">Bh", 0xD1, obj))
                if 0xFFFF < obj <= 0xFFFFFFFF:
                    return self._buffer.write(struct.pack(">BI", 0xCE, obj))
                if -0x80000000 <= obj < -0x8000:
                    return self._buffer.write(struct.pack(">Bi", 0xD2, obj))
                if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:
                    return self._buffer.write(struct.pack(">BQ", 0xCF, obj))
                if -0x8000000000000000 <= obj < -0x80000000:
                    return self._buffer.write(struct.pack(">Bq", 0xD3, obj))
                if not default_used and self._default is not None:
                    obj = self._default(obj)
                    default_used = True
                    continue
                raise OverflowError("Integer value out of range")
            if check(obj, (bytes, bytearray)):
                n = len(obj)
                if n >= 2**32:
                    raise ValueError("%s is too large" % type(obj).__name__)
                self._pack_bin_header(n)
                return self._buffer.write(obj)
            if check(obj, unicode):
                obj = obj.encode("utf-8", self._unicode_errors)
                n = len(obj)
                if n >= 2**32:
                    raise ValueError("String is too large")
                self._pack_raw_header(n)
                return self._buffer.write(obj)
            if check(obj, memoryview):
                n = len(obj) * obj.itemsize
                if n >= 2**32:
                    raise ValueError("Memoryview is too large")
                self._pack_bin_header(n)
                return self._buffer.write(obj)
            if check(obj, float):
                if self._use_float:
                    return self._buffer.write(struct.pack(">Bf", 0xCA, obj))
                return self._buffer.write(struct.pack(">Bd", 0xCB, obj))
            if check(obj, (ExtType, Timestamp)):
                if check(obj, Timestamp):
                    code = -1
                    data = obj.to_bytes()
                else:
                    code = obj.code
                    data = obj.data
                assert isinstance(code, int)
                assert isinstance(data, bytes)
                L = len(data)
                if L == 1:
                    self._buffer.write(b"\xd4")
                elif L == 2:
                    self._buffer.write(b"\xd5")
                elif L == 4:
                    self._buffer.write(b"\xd6")
                elif L == 8:
                    self._buffer.write(b"\xd7")
                elif L == 16:
                    self._buffer.write(b"\xd8")
                elif L <= 0xFF:
                    self._buffer.write(struct.pack(">BB", 0xC7, L))
                elif L <= 0xFFFF:
                    self._buffer.write(struct.pack(">BH", 0xC8, L))
                else:
                    self._buffer.write(struct.pack(">BI", 0xC9, L))
                self._buffer.write(struct.pack("b", code))
                self._buffer.write(data)
                return
            if check(obj, list_types):
                n = len(obj)
                self._pack_array_header(n)
                for i in xrange(n):
                    self._pack(obj[i], nest_limit - 1)
                return
            if check(obj, dict):
                return self._pack_map_pairs(len(obj), dict_iteritems(obj),
                                            nest_limit - 1)

            if self._datetime and check(obj,
                                        _DateTime) and obj.tzinfo is not None:
                obj = Timestamp.from_datetime(obj)
                default_used = 1
                continue

            if not default_used and self._default is not None:
                obj = self._default(obj)
                default_used = 1
                continue
            raise TypeError("Cannot serialize %r" % (obj, ))

    def pack(self, obj):
        try:
            self._pack(obj)
        except:
            self._buffer = StringIO()  # force reset
            raise
        if self._autoreset:
            ret = self._buffer.getvalue()
            self._buffer = StringIO()
            return ret

    def pack_map_pairs(self, pairs):
        self._pack_map_pairs(len(pairs), pairs)
        if self._autoreset:
            ret = self._buffer.getvalue()
            self._buffer = StringIO()
            return ret

    def pack_array_header(self, n):
        if n >= 2**32:
            raise ValueError
        self._pack_array_header(n)
        if self._autoreset:
            ret = self._buffer.getvalue()
            self._buffer = StringIO()
            return ret

    def pack_map_header(self, n):
        if n >= 2**32:
            raise ValueError
        self._pack_map_header(n)
        if self._autoreset:
            ret = self._buffer.getvalue()
            self._buffer = StringIO()
            return ret

    def pack_ext_type(self, typecode, data):
        if not isinstance(typecode, int):
            raise TypeError("typecode must have int type.")
        if not 0 <= typecode <= 127:
            raise ValueError("typecode should be 0-127")
        if not isinstance(data, bytes):
            raise TypeError("data must have bytes type")
        L = len(data)
        if L > 0xFFFFFFFF:
            raise ValueError("Too large data")
        if L == 1:
            self._buffer.write(b"\xd4")
        elif L == 2:
            self._buffer.write(b"\xd5")
        elif L == 4:
            self._buffer.write(b"\xd6")
        elif L == 8:
            self._buffer.write(b"\xd7")
        elif L == 16:
            self._buffer.write(b"\xd8")
        elif L <= 0xFF:
            self._buffer.write(b"\xc7" + struct.pack("B", L))
        elif L <= 0xFFFF:
            self._buffer.write(b"\xc8" + struct.pack(">H", L))
        else:
            self._buffer.write(b"\xc9" + struct.pack(">I", L))
        self._buffer.write(struct.pack("B", typecode))
        self._buffer.write(data)

    def _pack_array_header(self, n):
        if n <= 0x0F:
            return self._buffer.write(struct.pack("B", 0x90 + n))
        if n <= 0xFFFF:
            return self._buffer.write(struct.pack(">BH", 0xDC, n))
        if n <= 0xFFFFFFFF:
            return self._buffer.write(struct.pack(">BI", 0xDD, n))
        raise ValueError("Array is too large")

    def _pack_map_header(self, n):
        if n <= 0x0F:
            return self._buffer.write(struct.pack("B", 0x80 + n))
        if n <= 0xFFFF:
            return self._buffer.write(struct.pack(">BH", 0xDE, n))
        if n <= 0xFFFFFFFF:
            return self._buffer.write(struct.pack(">BI", 0xDF, n))
        raise ValueError("Dict is too large")

    def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
        self._pack_map_header(n)
        for (k, v) in pairs:
            self._pack(k, nest_limit - 1)
            self._pack(v, nest_limit - 1)

    def _pack_raw_header(self, n):
        if n <= 0x1F:
            self._buffer.write(struct.pack("B", 0xA0 + n))
        elif self._use_bin_type and n <= 0xFF:
            self._buffer.write(struct.pack(">BB", 0xD9, n))
        elif n <= 0xFFFF:
            self._buffer.write(struct.pack(">BH", 0xDA, n))
        elif n <= 0xFFFFFFFF:
            self._buffer.write(struct.pack(">BI", 0xDB, n))
        else:
            raise ValueError("Raw is too large")

    def _pack_bin_header(self, n):
        if not self._use_bin_type:
            return self._pack_raw_header(n)
        elif n <= 0xFF:
            return self._buffer.write(struct.pack(">BB", 0xC4, n))
        elif n <= 0xFFFF:
            return self._buffer.write(struct.pack(">BH", 0xC5, n))
        elif n <= 0xFFFFFFFF:
            return self._buffer.write(struct.pack(">BI", 0xC6, n))
        else:
            raise ValueError("Bin is too large")

    def bytes(self):
        """Return internal buffer contents as bytes object"""
        return self._buffer.getvalue()

    def reset(self):
        """Reset internal buffer.

        This method is useful only when autoreset=False.
        """
        self._buffer = StringIO()

    def getbuffer(self):
        """Return view of internal buffer."""
        if USING_STRINGBUILDER or PY2:
            return memoryview(self.bytes())
        else:
            return self._buffer.getbuffer()
Пример #42
0
def mock_imgur5k(tmpdir_factory, mock_image_stream):
    file = BytesIO(mock_image_stream)
    image_folder = tmpdir_factory.mktemp("images")
    label_folder = tmpdir_factory.mktemp("dataset_info")
    labels = {
        "index_id": {
            "YsaVkzl": {
                "image_url": "https://i.imgur.com/YsaVkzl.jpg",
                "image_path":
                "/path/to/IMGUR5K-Handwriting-Dataset/images/YsaVkzl.jpg",
                "image_hash": "993a7cbb04a7c854d1d841b065948369"
            },
            "wz3wHhN": {
                "image_url": "https://i.imgur.com/wz3wHhN.jpg",
                "image_path":
                "/path/to/IMGUR5K-Handwriting-Dataset/images/wz3wHhN.jpg",
                "image_hash": "9157426a98ee52f3e1e8d41fa3a99175"
            },
            "BRHSP23": {
                "image_url": "https://i.imgur.com/BRHSP23.jpg",
                "image_path":
                "/path/to/IMGUR5K-Handwriting-Dataset/images/BRHSP23.jpg",
                "image_hash": "aab01f7ac82ae53845b01674e9e34167"
            }
        },
        "index_to_ann_map": {
            "YsaVkzl": ["YsaVkzl_0", "YsaVkzl_1", "YsaVkzl_2"],
            "wz3wHhN": ["wz3wHhN_0", "wz3wHhN_1"],
            "BRHSP23": ["BRHSP23_0"]
        },
        "ann_id": {
            "YsaVkzl_0": {
                "word": "I",
                "bounding_box": "[605.33, 1150.67, 614.33, 226.33, 81.0]"
            },
            "YsaVkzl_1": {
                "word": "am",
                "bounding_box": "[783.67, 654.67, 521.0, 222.33, 56.67]"
            },
            "YsaVkzl_2": {
                "word": "a",
                "bounding_box": "[959.0, 437.0, 76.67, 201.0, 38.33]"
            },
            "wz3wHhN_0": {
                "word": "jedi",
                "bounding_box": "[783.67, 654.67, 521.0, 222.33, 56.67]"
            },
            "wz3wHhN_1": {
                "word": "!",
                "bounding_box": "[959.0, 437.0, 76.67, 201.0, 38.33]"
            },
            "BRHSP23_0": {
                "word": "jedi",
                "bounding_box": "[783.67, 654.67, 521.0, 222.33, 56.67]"
            }
        }
    }
    label_file = label_folder.join("imgur5k_annotations.json")
    with open(label_file, 'w') as f:
        json.dump(labels, f)
    for index_id in ['YsaVkzl', 'wz3wHhN', 'BRHSP23']:
        fn_i = image_folder.join(f"{index_id}.jpg")
        with open(fn_i, 'wb') as f:
            f.write(file.getbuffer())
    return str(image_folder), str(label_file)
Пример #43
0
def decode(data: bytes, max_size: int = 4096) -> bytearray:
    """
    Performs LZSS decoding

    Parameters
    ----------
    data: bytes
        A string of bytes to decompress

    max_size: int
        Maximum size of uncompressed data, in bytes

    Returns
    -------
    bytearray
        A bytearray containing the uncompressed data
    """
    reader = BytesIO(data)
    length = len(reader.getbuffer())

    flags = 0  # Encoded flag
    flags_used = 7  # Unencoded flag

    out_data = bytearray()

    while len(out_data) < max_size:

        flags = flags >> 1
        flags_used = flags_used + 1

        # If all flag bits have been shifted out, read a new flag
        if flags_used == 8:

            if reader.tell() == length:
                break

            flags = reader.read(1)[0]
            flags_used = 0

        # Found an unencoded byte
        if (flags & 1) != 0:

            if reader.tell() == length:
                break

            out_data.append(reader.read(1)[0])

        # Found encoded data
        else:

            if reader.tell() == length:
                break

            code_offset = reader.read(1)[0]

            if reader.tell() == length:
                break

            code_length = reader.read(1)[0] + MAX_UNENCODED + 1

            for i in range(0, code_length):
                out_data.append(out_data[len(out_data) - (code_offset + 1)])

    return out_data
Пример #44
0
def encode(data: any) -> bytes:
    """
    Perform LZSS Algorithm Encoding
    """
    global hash_table

    writer = BytesIO()
    input_buffer = bytearray(data)

    length = len(input_buffer)
    if length == 0:
        return bytes(0)

    # Start with an empty list
    hash_table = list()
    for i in range(0, HASH_SIZE):
        hash_table.append([])

    flag_data = FlagData()

    # 8 code flags and 8 encoded strings
    flag_data.flags = 0
    flag_data.flag_position = 1
    encoded_data = bytearray(256 * 8)
    flag_data.next_encoded = 0  # Next index of encoded data

    input_buffer_position = 0  # Head of encoded lookahead

    for i in range(0, length - MAX_UNENCODED):
        hash_key = get_hash_key(input_buffer, i)
        hash_table[hash_key].append(i)

    match_data = find_match(input_buffer, input_buffer_position)

    while input_buffer_position < length:

        # Extend match length if trailing rubbish is present
        if input_buffer_position + match_data.length > length:
            match_data.length = length - input_buffer_position

        # Write unencoded byte if match is not long enough
        if match_data.length <= MAX_UNENCODED:

            match_data.length = 1  # 1 unencoded byte

            flag_data.flags = flag_data.flags | flag_data.flag_position  # Flag unencoded byte
            encoded_data[
                flag_data.next_encoded] = input_buffer[input_buffer_position]
            flag_data.next_encoded = flag_data.next_encoded + 1
            update_flags(flag_data, encoded_data, writer)

        # Encode as offset and length if match length >= max unencoded
        else:
            match_data.offset = (input_buffer_position - 1) - match_data.offset
            if match_data.offset > 255 or match_data.offset < 0:
                print("Match Data Offset out of range!")
                return bytes(0)
            if match_data.length - (MAX_UNENCODED + 1) > 255:
                print("Match Data Length out of range!")
                return bytes(0)

            encoded_data[flag_data.next_encoded] = match_data.offset
            flag_data.next_encoded = flag_data.next_encoded + 1

            encoded_data[flag_data.next_encoded] = match_data.length - (
                MAX_UNENCODED + 1)
            flag_data.next_encoded = flag_data.next_encoded + 1
            update_flags(flag_data, encoded_data, writer)

        input_buffer_position = input_buffer_position + match_data.length

        # Find next match
        match_data = find_match(input_buffer, input_buffer_position)

    # Write any remaining encoded data
    if flag_data.next_encoded != 0:
        writer.write(flag_data.flags.to_bytes(1, "little"))
        for i in range(0, flag_data.next_encoded):
            writer.write(encoded_data[i].to_bytes(1, "little"))

    return writer.getbuffer().tobytes(order='A')
Пример #45
0
    def generate_plot(self, plot_size=6):
        plot_size = int(self.plot_size) if int(self.plot_size) else plot_size
        if len(self.point) == 2:
            My_X, My_Y = np.meshgrid(range(10 * plot_size),
                                     range(10 * plot_size))
            My_X = (My_X - 5 * plot_size) / 5
            My_Y = (My_Y - 5 * plot_size) / 5
            My_Z = np.ones((10 * plot_size, 10 * plot_size))
            for i in range(10 * plot_size):
                for j in range(10 * plot_size):
                    My_Z[i, j] = self.calculate_function_value_in_point(
                        self.function, [My_X[i, j], My_Y[i][0]])

            fig = Figure()
            ax = Axes3D(fig)

            # 1stplot

            X, Y, Z = My_X, My_Y, My_Z
            ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
            ax.plot3D(np.array(self.path_x),
                      np.array(self.path_y),
                      np.array(self.path_z),
                      c='r',
                      marker='o')
            cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
            cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
            cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)

            buf = BytesIO()
            fig.savefig(buf, format="png")
            img1 = base64.b64encode(buf.getbuffer()).decode("ascii")

            # 2nd plot

            ax.view_init(azim=-90, elev=90)

            buf = BytesIO()
            fig.savefig(buf, format="png")
            img2 = base64.b64encode(buf.getbuffer()).decode("ascii")

            # 3rd plot
            surf = ax.plot_surface(My_X,
                                   My_Y,
                                   My_Z,
                                   rstride=1,
                                   cstride=1,
                                   cmap=cm.coolwarm,
                                   linewidth=0,
                                   antialiased=False)
            ax.plot(np.array(self.path_x),
                    np.array(self.path_y),
                    np.array(self.path_z),
                    'ro',
                    alpha=0.5)
            ax.zaxis.set_major_locator(LinearLocator(10))
            ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
            fig.colorbar(surf, shrink=0.5, aspect=5)
            ax.view_init(azim=None, elev=None)

            buf = BytesIO()
            fig.savefig(buf, format="png")
            img3 = base64.b64encode(buf.getbuffer()).decode("ascii")

            img = [img1, img2, img3]

            return img
            #return plt
            #plt.show()
        else:
            return [
                "It's not 3D object", "It's not 3D object",
                "It's not 3D object"
            ]
Пример #46
0
def pie_city():
    #cities = ['ha hoi', 'thanh pho ho chi minh', 'da nang']
    db = get_db()
    jobs_count = db.jobs_info.count()
    cities = db.jobs_info.distinct('city')
    city_job_data = []
    c = []
    num = []
    salary = []
    for city in cities:
        query = {"city": city}
        # print(city + '-' + str(db.jobs_info.find(query).count()/jobs_count))
        # if(db.jobs_info.find(query).count()/jobs_count > 0.05):
        city_job_data.append({
            'city': city,
            'num': db.jobs_info.find(query).count(),
        })
    for job in db.jobs_info.find():
        if (job["maxSalary"] != 0):
            salary.append(job["maxSalary"] / 1000000)
            # print(job["maxSalary"])
    sorted_data = sorted(city_job_data, key=lambda k: k['num'], reverse=True)
    for i in range(5):
        c.append(sorted_data[i]['city'])
        num.append(sorted_data[i]['num'])
    # Draw Chart
    # print(city_jobs_count)
    fig = Figure()
    gs = fig.add_gridspec(1, 1)
    ax1 = fig.add_subplot(gs[0, 0])
    ax1.set_title("5 tỉnh thành có số lương công việc nhiều nhất")
    # ax2 = fig.add_subplot(gs[1, 0])
    # ax3 = fig.add_subplot(gs[0, 1])
    # ax4 = fig.add_subplot(gs[1, 1])
    pie = ax1.pie(num, autopct='%1.3f%%')
    ax1.legend(pie[0], c, loc="lower right", bbox_to_anchor=(0.25, 0))
    # ax2.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # ax3.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # ax4.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # Save it to a temporary buffer.
    buf = BytesIO()
    fig.savefig(buf, format="png")
    # Embed the result in the html output.
    data = base64.b64encode(buf.getbuffer()).decode("ascii")
    # return f"<img src='data:image/png;base64,{data}'/>"
    fig2 = Figure()
    gs2 = fig2.add_gridspec(1, 1)
    ax2 = fig2.add_subplot(gs[0, 0])
    ax2.set_xlabel('Mức lương (triệu đồng)')
    ax2.set_ylabel('Số lượng công việc')
    ax2.set_title('Phân bố mức lương')
    # ax2 =fig2.add_subplot(gs[1, 0])
    # ax3 = fig2.add_subplot(gs[0, 1])
    # ax4 = fig2.add_subplot(gs[1, 1])
    hist = ax2.hist(salary, 80)
    # ax2.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # ax3.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # ax4.pie(city_jobs_count, labels = cities,autopct='%1.2f%%')
    # Save it to a temporary buffer.
    buf = BytesIO()
    fig2.savefig(buf, format="png")
    # Embed the result in the html output.
    data2 = base64.b64encode(buf.getbuffer()).decode("ascii")
    return render_template("charts/pie.html", pie=data, hist=data2)
Пример #47
0
async def on_member_join(mem):
    im = Image.open("./banner2.png")
    
    draw = ImageDraw.Draw(im, mode='RGBA')
    
    text = mem.display_name
    fsiz = 48
    font=ImageFont.truetype("./Starfish.ttf", fsiz)
    while draw.textsize(text, font=font)[0] > 430:
        fsiz -= 1
        font=ImageFont.truetype("./Starfish.ttf", fsiz)

    tx, ty = draw.textsize(text, font=font)

    x = 250 - tx//2
    y = round(158 * 1.8) - ty//2
    #shadowcolor = (100, 100, 100)
    #shadowcolor = (255,255,255)
    fillcolor = (165, 214, 254)
    shadowcolor = (105, 154, 194)
    a = "center"
    draw.text((x-1, y-1), text, font=font, fill=shadowcolor, align=a)
    draw.text((x+1, y+1), text, font=font, fill=shadowcolor, align=a)
    draw.text((x+1, y-1), text, font=font, fill=shadowcolor, align=a)
    draw.text((x-1, y+1), text, font=font, fill=shadowcolor, align=a)
    draw.text((x, y), text, font=font, fill=fillcolor, align=a)
    


    avatar_im = None
    url = mem.avatar_url
    if not url:
        url = mem.default_avatar_url

    while True:
        async with aiohttp.ClientSession(loop=bot.loop) as aiosession:
            with aiohttp.Timeout(10):
                async with aiosession.get(url) as resp:
                    avatar_im = BytesIO(await resp.read())
                    if avatar_im.getbuffer().nbytes > 0 or retries == 0:
                        await aiosession.close()
                        break
                    retries -= 1
                    print('0 nbytes image found. Retries left: {}'.format(retries+1))

    ava_sqdim = 78
    resize = (ava_sqdim, ava_sqdim)
    avatar_im = Image.open(avatar_im).convert("RGBA")
    avatar_im = avatar_im.resize(resize, Image.ANTIALIAS)
    avatar_im.putalpha(avatar_im.split()[3])

    is_square = False
    if not is_square:
        mask = Image.new('L', resize, 0)
        maskDraw = ImageDraw.Draw(mask)
        maskDraw.ellipse((0, 0) + resize, fill=255)
        mask = mask.resize(avatar_im.size, Image.ANTIALIAS)
        avatar_im.putalpha(mask)
        
    img_center_x = (im.width // 2)
    img_center_y = (im.height // 2)

    offset_x = 109
    offset_y = 36

    img_offset_x = img_center_x + offset_x
    img_offset_y = img_center_y + offset_y
    ava_right = img_offset_x + avatar_im.width//2
    ava_bottom = img_offset_y + avatar_im.height//2
    ava_left = img_offset_x - avatar_im.width//2
    ava_top = img_offset_y - avatar_im.height//2
    avatar_im = tint_image(avatar_im, (255, 255, 255, 80))
    im.paste(avatar_im, box=(ava_left, ava_top, ava_right, ava_bottom), mask=avatar_im)





    temp = BytesIO()
    im.save(temp, format="png")
    temp.seek(0)
    await bot.send_file(mem.server.default_channel ,temp, content="Give a popping welcome to " + mem.display_name + " :candy:", filename="welcome.png")
Пример #48
0
def plotting():
    if request.method == 'GET':
        form = PlottingForm()
        return render_template('plotting.html', title='Kuvaaja', form=form)
    elif request.method == 'POST':
        form = PlottingForm()
        print('POST')

        if form.validate_on_submit():
            print('form validated')

            # get the coefficients
            if form.const.data:
                const = form.const.data
            else:
                const = 0
            if form.x1.data:
                x1 = form.x1.data
            else:
                x1 = 0
            if form.x2.data:
                x2 = form.x2.data
            else:
                x2 = 0
            if form.x3.data:
                x3 = form.x3.data
            else:
                x3 = 0
            if form.x4.data:
                x4 = form.x4.data
            else:
                x4 = 0
            if form.x_start.data:
                x_start = form.x_start.data
            else:
                x_start = -5
            if form.x_end.data:
                x_end = form.x_end.data
            else:
                x_end = 5

            # form the figure title
            otsikko = ''
            for coeff, nome in zip([x4, x3, x2, x1, const],
                                   ['x^4', 'x^3', 'x^2', 'x', '']):
                if coeff != 0:
                    if coeff > 0:
                        otsikko = otsikko + str(coeff) + nome + " + "
                    else:
                        otsikko = otsikko[:-2] + "- " + str(coeff).lstrip(
                            '-') + nome + " + "
            otsikko = f"y = {otsikko.rstrip(' +*')} x:n arvoilla {x_start} ≤ x ≤ {x_end}".replace(
                '.', ',')
            print('otsikko: ', otsikko)

            # create the data to plot
            x = np.linspace(x_start, x_end, 100)
            y = const + x1 * x + x2 * x**2 + x3 * x**3 + x4 * x**4

            # generate the figure
            fig = Figure()

            ax = fig.subplots()
            ax.plot(x, y)
            ax.set_title(otsikko)
            ax.axhline(y=0, color='k')
            ax.axvline(x=0, color='k')
            ax.set_xlabel('x')
            ax.set_ylabel('y')
            ax.grid()

            # save it to a temporary buffer
            buf = BytesIO()
            fig.savefig(buf, format='png')

            # prepare the data for html output
            data = base64.b64encode(buf.getbuffer()).decode("ascii")

            return render_template('plotting.html',
                                   title='Kuvaaja',
                                   form=form,
                                   data=data)

        flash('Tarkista syöttämäsi arvot.')
        return render_template('plotting.html', title='Kuvaaja', form=form)
Пример #49
0
class Headers:

    header_size = 112
    chunk_size = 10**16

    max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    genesis_hash = b'9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
    target_timespan = 150
    checkpoints = HASHES
    first_block_timestamp = 1466646588  # block 1, as 0 is off by a lot
    timestamp_average_offset = 160.6855883050695  # calculated at 733447

    validate_difficulty: bool = True

    def __init__(self, path) -> None:
        self.io = None
        self.path = path
        self._size: Optional[int] = None
        self.chunk_getter: Optional[Callable] = None
        self.known_missing_checkpointed_chunks = set()
        self.check_chunk_lock = asyncio.Lock()

    async def open(self):
        self.io = BytesIO()
        if self.path != ':memory:':
            def _readit():
                if os.path.exists(self.path):
                    with open(self.path, 'r+b') as header_file:
                        self.io.seek(0)
                        self.io.write(header_file.read())
            await asyncio.get_event_loop().run_in_executor(None, _readit)
        bytes_size = self.io.seek(0, os.SEEK_END)
        self._size = bytes_size // self.header_size
        max_checkpointed_height = max(self.checkpoints.keys() or [-1]) + 1000
        if bytes_size % self.header_size:
            log.warning("Reader file size doesnt match header size. Repairing, might take a while.")
            await self.repair()
        else:
            # try repairing any incomplete write on tip from previous runs (outside of checkpoints, that are ok)
            await self.repair(start_height=max_checkpointed_height)
        await self.ensure_checkpointed_size()
        await self.get_all_missing_headers()

    async def close(self):
        if self.io is not None:
            def _close():
                flags = 'r+b' if os.path.exists(self.path) else 'w+b'
                with open(self.path, flags) as header_file:
                    header_file.write(self.io.getbuffer())
            await asyncio.get_event_loop().run_in_executor(None, _close)
            self.io.close()
            self.io = None

    @staticmethod
    def serialize(header):
        return b''.join([
            struct.pack('<I', header['version']),
            unhexlify(header['prev_block_hash'])[::-1],
            unhexlify(header['merkle_root'])[::-1],
            unhexlify(header['claim_trie_root'])[::-1],
            struct.pack('<III', header['timestamp'], header['bits'], header['nonce'])
        ])

    @staticmethod
    def deserialize(height, header):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits, nonce = struct.unpack('<III', header[100:112])
        return {
            'version': version,
            'prev_block_hash': hexlify(header[4:36][::-1]),
            'merkle_root': hexlify(header[36:68][::-1]),
            'claim_trie_root': hexlify(header[68:100][::-1]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': nonce,
            'block_height': height,
        }

    def get_next_chunk_target(self, chunk: int) -> ArithUint256:
        return ArithUint256(self.max_target)

    def get_next_block_target(self, max_target: ArithUint256, previous: Optional[dict],
                              current: Optional[dict]) -> ArithUint256:
        # https://github.com/lbryio/lbrycrd/blob/master/src/lbry.cpp
        if previous is None and current is None:
            return max_target
        if previous is None:
            previous = current
        actual_timespan = current['timestamp'] - previous['timestamp']
        modulated_timespan = self.target_timespan + int((actual_timespan - self.target_timespan) / 8)
        minimum_timespan = self.target_timespan - int(self.target_timespan / 8)  # 150 - 18 = 132
        maximum_timespan = self.target_timespan + int(self.target_timespan / 2)  # 150 + 75 = 225
        clamped_timespan = max(minimum_timespan, min(modulated_timespan, maximum_timespan))
        target = ArithUint256.from_compact(current['bits'])
        new_target = min(max_target, (target * clamped_timespan) / self.target_timespan)
        return new_target

    def __len__(self) -> int:
        return self._size

    def __bool__(self):
        return True

    async def get(self, height) -> dict:
        if isinstance(height, slice):
            raise NotImplementedError("Slicing of header chain has not been implemented yet.")
        try:
            return self.deserialize(height, await self.get_raw_header(height))
        except struct.error:
            raise IndexError(f"failed to get {height}, at {len(self)}")

    def estimated_timestamp(self, height, try_real_headers=True):
        if height <= 0:
            return
        if try_real_headers and self.has_header(height):
            offset = height * self.header_size
            return struct.unpack('<I', self.io.getbuffer()[offset + 100: offset + 104])[0]
        return int(self.first_block_timestamp + (height * self.timestamp_average_offset))

    def estimated_julian_day(self, height):
        return date_to_julian_day(date.fromtimestamp(self.estimated_timestamp(height, False)))

    async def get_raw_header(self, height) -> bytes:
        if self.chunk_getter:
            await self.ensure_chunk_at(height)
        if not 0 <= height <= self.height:
            raise IndexError(f"{height} is out of bounds, current height: {self.height}")
        return self._read(height)

    def _read(self, height, count=1):
        offset = height * self.header_size
        return bytes(self.io.getbuffer()[offset: offset + self.header_size * count])

    def chunk_hash(self, start, count):
        return self.hash_header(self._read(start, count)).decode()

    async def ensure_checkpointed_size(self):
        max_checkpointed_height = max(self.checkpoints.keys() or [-1])
        if self.height < max_checkpointed_height:
            self._write(max_checkpointed_height, bytes([0] * self.header_size * 1000))

    async def ensure_chunk_at(self, height):
        async with self.check_chunk_lock:
            if self.has_header(height):
                log.debug("has header %s", height)
                return
            return await self.fetch_chunk(height)

    async def fetch_chunk(self, height):
        log.info("on-demand fetching height %s", height)
        start = (height // 1000) * 1000
        headers = await self.chunk_getter(start)  # pylint: disable=not-callable
        chunk = (
            zlib.decompress(base64.b64decode(headers['base64']), wbits=-15, bufsize=600_000)
        )
        chunk_hash = self.hash_header(chunk).decode()
        if self.checkpoints.get(start) == chunk_hash:
            self._write(start, chunk)
            if start in self.known_missing_checkpointed_chunks:
                self.known_missing_checkpointed_chunks.remove(start)
            return
        elif start not in self.checkpoints:
            return  # todo: fixme
        raise Exception(
            f"Checkpoint mismatch at height {start}. Expected {self.checkpoints[start]}, but got {chunk_hash} instead."
        )

    def has_header(self, height):
        normalized_height = (height // 1000) * 1000
        if normalized_height in self.checkpoints:
            return normalized_height not in self.known_missing_checkpointed_chunks

        empty = '56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d'
        all_zeroes = '789d737d4f448e554b318c94063bbfa63e9ccda6e208f5648ca76ee68896557b'
        return self.chunk_hash(height, 1) not in (empty, all_zeroes)

    async def get_all_missing_headers(self):
        # Heavy operation done in one optimized shot
        for chunk_height, expected_hash in reversed(list(self.checkpoints.items())):
            if chunk_height in self.known_missing_checkpointed_chunks:
                continue
            if self.chunk_hash(chunk_height, 1000) != expected_hash:
                self.known_missing_checkpointed_chunks.add(chunk_height)
        return self.known_missing_checkpointed_chunks

    @property
    def height(self) -> int:
        return len(self)-1

    @property
    def bytes_size(self):
        return len(self) * self.header_size

    async def hash(self, height=None) -> bytes:
        return self.hash_header(
            await self.get_raw_header(height if height is not None else self.height)
        )

    @staticmethod
    def hash_header(header: bytes) -> bytes:
        if header is None:
            return b'0' * 64
        return hexlify(double_sha256(header)[::-1])

    async def connect(self, start: int, headers: bytes) -> int:
        added = 0
        bail = False
        for height, chunk in self._iterate_chunks(start, headers):
            try:
                # validate_chunk() is CPU bound and reads previous chunks from file system
                await self.validate_chunk(height, chunk)
            except InvalidHeader as e:
                bail = True
                chunk = chunk[:(height-e.height)*self.header_size]
            if chunk:
                added += self._write(height, chunk)
            if bail:
                break
        return added

    def _write(self, height, verified_chunk):
        self.io.seek(height * self.header_size, os.SEEK_SET)
        written = self.io.write(verified_chunk) // self.header_size
        # self.io.truncate()
        # .seek()/.write()/.truncate() might also .flush() when needed
        # the goal here is mainly to ensure we're definitely flush()'ing
        self.io.flush()
        self._size = max(self._size or 0, self.io.tell() // self.header_size)
        return written

    async def validate_chunk(self, height, chunk):
        previous_hash, previous_header, previous_previous_header = None, None, None
        if height > 0:
            raw = await self.get_raw_header(height-1)
            previous_header = self.deserialize(height-1, raw)
            previous_hash = self.hash_header(raw)
        if height > 1:
            previous_previous_header = await self.get(height-2)
        chunk_target = self.get_next_chunk_target(height // 2016 - 1)
        for current_hash, current_header in self._iterate_headers(height, chunk):
            block_target = self.get_next_block_target(chunk_target, previous_previous_header, previous_header)
            self.validate_header(height, current_hash, current_header, previous_hash, block_target)
            previous_previous_header = previous_header
            previous_header = current_header
            previous_hash = current_hash

    def validate_header(self, height: int, current_hash: bytes,
                        header: dict, previous_hash: bytes, target: ArithUint256):

        if previous_hash is None:
            if self.genesis_hash is not None and self.genesis_hash != current_hash:
                raise InvalidHeader(
                    height, f"genesis header doesn't match: {current_hash.decode()} "
                            f"vs expected {self.genesis_hash.decode()}")
            return

        if header['prev_block_hash'] != previous_hash:
            raise InvalidHeader(
                height, "previous hash mismatch: {} vs expected {}".format(
                    header['prev_block_hash'].decode(), previous_hash.decode())
            )

        if self.validate_difficulty:

            if header['bits'] != target.compact:
                raise InvalidHeader(
                    height, "bits mismatch: {} vs expected {}".format(
                        header['bits'], target.compact)
                )

            proof_of_work = self.get_proof_of_work(current_hash)
            if proof_of_work > target:
                raise InvalidHeader(
                    height, f"insufficient proof of work: {proof_of_work.value} vs target {target.value}"
                )

    async def repair(self, start_height=0):
        previous_header_hash = fail = None
        batch_size = 36
        for height in range(start_height, self.height, batch_size):
            headers = self._read(height, batch_size)
            if len(headers) % self.header_size != 0:
                headers = headers[:(len(headers) // self.header_size) * self.header_size]
            for header_hash, header in self._iterate_headers(height, headers):
                height = header['block_height']
                if previous_header_hash:
                    if header['prev_block_hash'] != previous_header_hash:
                        fail = True
                elif height == 0:
                    if header_hash != self.genesis_hash:
                        fail = True
                else:
                    # for sanity and clarity, since it is the only way we can end up here
                    assert start_height > 0 and height == start_height
                if fail:
                    log.warning("Header file corrupted at height %s, truncating it.", height - 1)
                    self.io.seek(max(0, (height - 1)) * self.header_size, os.SEEK_SET)
                    self.io.truncate()
                    self.io.flush()
                    self._size = self.io.seek(0, os.SEEK_END) // self.header_size
                    return
                previous_header_hash = header_hash

    @classmethod
    def get_proof_of_work(cls, header_hash: bytes):
        return ArithUint256(int(b'0x' + cls.header_hash_to_pow_hash(header_hash), 16))

    def _iterate_chunks(self, height: int, headers: bytes) -> Iterator[Tuple[int, bytes]]:
        assert len(headers) % self.header_size == 0, f"{len(headers)} {len(headers)%self.header_size}"
        start = 0
        end = (self.chunk_size - height % self.chunk_size) * self.header_size
        while start < end:
            yield height + (start // self.header_size), headers[start:end]
            start = end
            end = min(len(headers), end + self.chunk_size * self.header_size)

    def _iterate_headers(self, height: int, headers: bytes) -> Iterator[Tuple[bytes, dict]]:
        assert len(headers) % self.header_size == 0, len(headers)
        for idx in range(len(headers) // self.header_size):
            start, end = idx * self.header_size, (idx + 1) * self.header_size
            header = headers[start:end]
            yield self.hash_header(header), self.deserialize(height+idx, header)

    @staticmethod
    def header_hash_to_pow_hash(header_hash: bytes):
        header_hash_bytes = unhexlify(header_hash)[::-1]
        h = sha512(header_hash_bytes)
        pow_hash = double_sha256(
            ripemd160(h[:len(h) // 2]) +
            ripemd160(h[len(h) // 2:])
        )
        return hexlify(pow_hash[::-1])
Пример #50
0
    def binary_search(self,
                      picture,
                      size_target,
                      dimension,
                      dimension_factor,
                      rotation,
                      i,
                      max_i,
                      quality,
                      L,
                      R,
                      im_buffer=None):

        # It's possible that the picture file size is already less than the target
        # file size, but we can still rotate the image here.
        if picture.size < size_target:
            print("{} is already less than {} bytes".format(
                picture, size_target))
            im = Image.open(picture)
            if rotation == 90:
                im = im.transpose(Image.ROTATE_90)
            elif rotation == 180:
                im = im.transpose(Image.ROTATE_180)
            elif rotation == 270:
                im = im.transpose(Image.ROTATE_270)
            im_buffer = BytesIO()
            im.save(im_buffer, "JPEG", quality=quality)
            return im_buffer

        # If the maximum number of iterations have been reached, return
        if i > max_i:
            print("Max iterations have been reached for {}".format(picture))
            return im_buffer

        # Open the image file, alter its dimensions, and save it as a new BytesIO file
        # named 'im_buffer'.
        if quality <= 95:
            im = Image.open(picture)
            if rotation == 90:
                im = im.transpose(Image.ROTATE_90)
            elif rotation == 180:
                im = im.transpose(Image.ROTATE_180)
            elif rotation == 270:
                im = im.transpose(Image.ROTATE_270)
            new_dimension = (dimension[0] * dimension_factor,
                             dimension[1] * dimension_factor)
            im.thumbnail(new_dimension, Image.ANTIALIAS)
            # new_prefix = '{}x-'.format(dimension_factor)
            # new_name = new_prefix + name + '-' + str(dimension[0]) + '.jpg'
            im_buffer = BytesIO()
            im.save(im_buffer, "JPEG", quality=quality)

            # Use L and R pointers to move closer to a value for the 'quality' parameter
            # that produces an image with a file size, in bytes, as close to size_target
            # as possible using a binary search-type of algorithm.
            if im_buffer.getbuffer().nbytes < size_target:
                print(
                    'Resulting image size is LESS    than {} bytes:'.format(
                        size_target),
                    im_buffer.getbuffer().nbytes, 'bytes, quality =', quality)
                L = quality
                quality = int((R + L) / 2)
                return self.binary_search(picture, size_target, dimension,
                                          dimension_factor, rotation, i + 1,
                                          max_i, quality, L, R, im_buffer)
            elif im_buffer.getbuffer().nbytes > size_target:
                print(
                    'Resulting image size is GREATER than {} bytes:'.format(
                        size_target),
                    im_buffer.getbuffer().nbytes, 'bytes, quality =', quality)
                R = quality
                quality = int((R + L) / 2)
                return self.binary_search(picture, size_target, dimension,
                                          dimension_factor, rotation, i + 1,
                                          max_i, quality, L, R, im_buffer)
            else:
                print(
                    'Resulting image size EQUALS {} bytes:'.format(
                        size_target),
                    im_buffer.getbuffer().nbytes, 'bytes, quality =', quality)
                return im_buffer
        else:
            return im_buffer
Пример #51
0
def _write_to_s3(time: datetime, key: OutputGroupingKey,
                 events: List[EngineResult]) -> None:
    # 'version', 'title', 'dedup_period' of a rule might differ if the rule was modified
    # while the rules engine was running. We pick the first encountered set of values.

    group_info = MatchingGroupInfo(
        rule_id=key.rule_id,
        rule_version=events[0].rule_version,
        log_type=key.log_type,
        dedup=key.dedup,
        dedup_period_mins=events[0].dedup_period_mins,
        num_matches=len(events),
        title=events[0].title,
        processing_time=time,
        is_rule_error=key.is_rule_error)
    alert_info = update_get_alert_info(group_info)
    data_stream = BytesIO()
    writer = gzip.GzipFile(fileobj=data_stream, mode='wb')
    for event in events:
        serialized_data = _serialize_event(event, alert_info)
        writer.write(serialized_data)

    writer.close()
    data_stream.seek(0)
    output_uuid = uuid.uuid4()
    if key.is_rule_error:
        key_format = _RULE_ERRORS_KEY_FORMAT
    else:
        key_format = _RULE_MATCHES_KEY_FORMAT

    object_key = key_format.format(key.table_name(), time.year, time.month,
                                   time.day, time.hour, key.rule_id,
                                   time.strftime(_S3_KEY_DATE_FORMAT),
                                   output_uuid)

    byte_size = data_stream.getbuffer().nbytes
    # Write data to S3
    _S3_CLIENT.put_object(Bucket=_S3_BUCKET,
                          ContentType='gzip',
                          Body=data_stream,
                          Key=object_key)

    # If the object written contains events that caused a rule engine error
    # don't send an SNS notification. TODO: Remove this condition and send a notification once
    # the backend supports rule errors end to end.
    if key.is_rule_error:
        return
    # Send notification to SNS topic
    notification = _s3_put_object_notification(_S3_BUCKET, object_key,
                                               byte_size)

    # MessageAttributes are required so that subscribers to SNS topic can filter events in the subscription
    _SNS_CLIENT.publish(TopicArn=_SNS_TOPIC_ARN,
                        Message=json.dumps(notification),
                        MessageAttributes={
                            'type': {
                                'DataType': 'String',
                                'StringValue': 'RuleMatches'
                            },
                            'id': {
                                'DataType': 'String',
                                'StringValue': key.rule_id
                            }
                        })
Пример #52
0
def order_report(request):
    base = request.POST.get("base")
    quote = request.POST.get("quote")
    exchange = "KuCoin"  # only KuCoin provides dataset of orders

    value_lower_bound = float(request.POST.get("value_lower_bound"))
    value_upper_bound = float(request.POST.get("value_upper_bound"))
    value_nbins = int(request.POST.get("value_nbins"))

    quantity_lower_bound = float(request.POST.get("quantity_lower_bound"))
    quantity_upper_bound = float(request.POST.get("quantity_upper_bound"))
    quantity_nbins = int(request.POST.get("quantity_nbins"))

    symbol = Symbol.objects.get(base__name__iexact=base,
                                quote__name__iexact=quote)
    exchange = Exchange.objects.get(name__iexact=exchange)

    df = pd.DataFrame(
        list(
            Order.objects.filter(exchange=exchange,
                                 symbol=symbol).values("price", "amount",
                                                       "time")))

    df["price"] = pd.to_numeric(df["price"])
    df["quantity"] = pd.to_numeric(df["amount"])

    del df["amount"]

    df["value"] = df["price"] * df["quantity"]
    df.set_index("time", inplace=True)

    value_size = df.loc[(df["value"] >= value_lower_bound)
                        & (df["value"] <= value_upper_bound)]["value"]
    value_bins = pd.Series(
        np.linspace(value_lower_bound, value_upper_bound, value_nbins + 1))
    value_out = pd.cut(value_size, value_bins)
    value_out_norm = value_out.value_counts(sort=False)
    value_x = value_out_norm.index
    value_y = value_out_norm

    plt.xticks(rotation=90, fontsize=6)
    sns.barplot(x=value_x, y=value_y)
    plt.xlabel("Value Range of Orders")
    plt.ylabel("Count")
    plt.tight_layout()
    buf = BytesIO()
    plt.savefig(buf, format="png", dpi=settings.PLOT_DPI)
    value_data = base64.b64encode(buf.getbuffer()).decode("ascii")
    plt.show(block=False)

    quantity_size = df.loc[(df["quantity"] >= quantity_lower_bound) & (
        df["quantity"] <= quantity_upper_bound)]["quantity"]
    quantity_bins = pd.Series(
        np.linspace(quantity_lower_bound, quantity_upper_bound,
                    quantity_nbins + 1))
    quantity_out = pd.cut(quantity_size, quantity_bins)
    quantity_out_norm = quantity_out.value_counts(sort=False)
    quantity_x = quantity_out_norm.index
    quantity_y = quantity_out_norm

    plt.xticks(rotation=90, fontsize=6)
    sns.barplot(x=quantity_x, y=quantity_y)
    plt.xlabel("Quantity Range of Orders")
    plt.ylabel("Count")
    plt.tight_layout()
    buf = BytesIO()
    plt.savefig(buf, format="png", dpi=settings.PLOT_DPI)
    quantity_data = base64.b64encode(buf.getbuffer()).decode("ascii")
    plt.show(block=False)

    return JsonResponse({
        "html":
        render_to_string(
            "market/order_report.html", {
                "order_num":
                len(df),
                "value_src":
                f"data:image/png;base64,{value_data}",
                "quantity_src":
                f"data:image/png;base64,{quantity_data}",
                "symbol":
                symbol,
                "exchange":
                exchange,
                "describe":
                df.describe().to_html(
                    classes=
                    "table is-bordered is-striped is-hoverable is-fullwidth".
                    split()),
            })
    })
Пример #53
0
class DolFile(object):
    def __init__(self, f):
        self._rawdata = BytesIO(f.read())
        f.seek(0)
        fileoffset = 0
        addressoffset = 0x48
        sizeoffset = 0x90

        self._text = []
        self._data = []

        nomoretext = False
        nomoredata = False

        self._current_end = None

        # Read text and data section addresses and sizes
        for i in range(18):
            f.seek(fileoffset + i * 4)
            offset = read_uint32(f)
            f.seek(addressoffset + i * 4)
            address = read_uint32(f)
            f.seek(sizeoffset + i * 4)
            size = read_uint32(f)

            if i <= 6:
                if offset != 0:
                    self._text.append((offset, address, size))
                    # print("text{0}".format(i), hex(offset), hex(address), hex(size))
            else:
                datanum = i - 7
                if offset != 0:
                    self._data.append((offset, address, size))
                    # print("data{0}".format(datanum), hex(offset), hex(address), hex(size))

        f.seek(0xD8)
        self.bssaddr = read_uint32(f)
        self.bsssize = read_uint32(f)

        #self.bss = BytesIO(self._rawdata.getbuffer()[self._bssaddr:self._bssaddr+self.bsssize])

        self._curraddr = self._text[0][1]
        self.seek(self._curraddr)

    @property
    def sections(self):
        for i in self._text:
            yield i
        for i in self._data:
            yield i

        return

    # Internal function for resolving a gc address
    def _resolve_address(self, gc_addr):
        for offset, address, size in self.sections:
            if address <= gc_addr < address + size:
                return offset, address, size
        """for offset, address, size in self._text:
            if address <= gc_addr < address+size:
                return offset, address, size 
        for offset, address, size in self._data:
            if address <= gc_addr < address+size:
                return offset, address, size """

        raise UnmappedAddress("Unmapped address: {0}".format(hex(gc_addr)))

    def _adjust_header(self):
        curr = self._rawdata.tell()
        fileoffset = 0
        addressoffset = 0x48
        sizeoffset = 0x90
        f = self._rawdata

        i = 0
        for offset, address, size in self._text:
            f.seek(fileoffset + i * 4)
            write_uint32(f, offset)
            f.seek(addressoffset + i * 4)
            write_uint32(f, address)
            f.seek(sizeoffset + i * 4)
            write_uint32(f, size)
            i += 1

        i = 7
        for offset, address, size in self._data:
            f.seek(fileoffset + i * 4)
            write_uint32(f, offset)
            f.seek(addressoffset + i * 4)
            write_uint32(f, address)
            f.seek(sizeoffset + i * 4)
            write_uint32(f, size)
            i += 1

        f.seek(0xD8)
        write_uint32(f, self.bssaddr)
        write_uint32(f, self.bsssize)

        f.seek(curr)

    # Unsupported: Reading an entire dol file
    # Assumption: A read should not go beyond the current section
    def read(self, size):
        if self._curraddr + size > self._current_end:
            raise RuntimeError("Read goes over current section")

        return self._rawdata.read(size)
        self._curraddr += size

    # Assumption: A write should not go beyond the current section
    def write(self, data):
        if self._curraddr + len(data) > self._current_end:
            raise RuntimeError("Write goes over current section")

        self._rawdata.write(data)
        self._curraddr += len(data)

    def seek(self, addr):
        offset, gc_start, gc_size = self._resolve_address(addr)
        self._rawdata.seek(offset + (addr - gc_start))

        self._curraddr = addr
        self._current_end = gc_start + gc_size

    def _add_section(self, newsize, section, addr=None):
        if addr is not None:
            last_addr = addr
        else:
            last_addr = 0
        last_offset = 0

        for offset, address, size in self.sections:
            if last_addr < address + size:
                last_addr = address + size
            if last_offset < offset + size:
                last_offset = offset + size

        if last_addr < self.bssaddr + self.bsssize:
            last_addr = self.bssaddr + self.bsssize

        section.append((last_offset, last_addr, newsize))
        curr = self._rawdata.tell()
        self._rawdata.seek(last_offset)
        self._rawdata.write(b" " * newsize)
        self._rawdata.seek(curr)

        return (last_offset, last_addr, newsize)

    def allocate_text_section(self, size, addr=None):
        assert len(self._text) <= 7
        if len(self._text) >= 7:
            raise SectionCountFull("Maximum amount of text sections reached!")

        return self._add_section(size, self._text, addr)

    def allocate_data_section(self, size, addr=None):
        assert len(self._data) <= 11
        if len(self._data) >= 11:
            raise SectionCountFull("Maximum amount of data sections reached!")

        return self._add_section(size, self._data, addr=None)

    def tell(self):
        return self._curraddr

    def save(self, f):
        self._adjust_header()
        f.write(self._rawdata.getbuffer())

    def print_info(self):
        print("Dol Info:")
        i = 0
        for offset, addr, size in self._text:
            print("text{0}: fileoffset {1:x}, addr {2:x}, size {3:x}".format(
                i, offset, addr, size))
            i += 1
        i = 0

        for offset, addr, size in self._data:
            print("data{0}: fileoffset {1:x}, addr {2:x}, size {3:x}".format(
                i, offset, addr, size))
            i += 1

        print("bss addr: {0:x}, bss size: {1:x}, bss end: {2:x}".format(
            self.bssaddr, self.bsssize, self.bssaddr + self.bsssize))
Пример #54
0
def create_graph(name, name1, stat, stat1, id):
    fig = Figure()
    ax = fig.subplots()
    bar1 = get_data(name, stat)
    session_time_name = get_data(name, 'datetime')
    session_time_name_1 = get_data(name1, 'datetime')
    session_time = session_time_name
    if len(session_time_name) != len(session_time_name_1):
        if len(session_time_name) >= len(session_time_name_1):
            session_time = session_time_name
        if len(session_time_name_1) >= len(session_time_name):
            session_time = session_time_name_1
    labels = []
    bar1_list = []
    bar2_list = []
    if id == 2:
        bar2 = get_data(name1, stat1)
        if len(bar1) >= len(bar2):
            difference = int(len(bar1) - len(bar2))
        if len(bar2) >= len(bar1):
            difference = int(len(bar2) - len(bar1))

        if len(bar1) != len(bar2):
            for x in range(difference):
                if len(bar1) <= len(bar2):
                    bar1_list.append(int(0))
                elif len(bar1) >= len(bar2):
                    bar2_list.append(int(0))
                elif len(bar2) <= len(bar1):
                    bar2_list.append(int(0))
                elif len(bar2) >= len(bar1):
                    bar1_list.append(0)
        for st2 in bar2:
            for s2 in st2:
                bar2_list.append(s2)
    for st1 in bar1:
        for s1 in st1:
            bar1_list.append(s1)
    for label in session_time:
        for l in label:
            labels.append(l)

    x = np.arange(len(bar1_list))
    width = 0.35
    if id == 1:
        rects1 = ax.bar(x - width / 800,
                        bar1_list,
                        width,
                        label=f"{name} {stat}")
    if id == 2:
        rects1 = ax.bar(x - width / 2,
                        bar1_list,
                        width,
                        label=f"{name} {stat}")
        rects2 = ax.bar(x + width / 2,
                        bar2_list,
                        width,
                        label=f"{name1} {stat1}")
    ax.set_ylabel('Stats')
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend(bbox_to_anchor=(0., 1.02, 1., .102),
              loc='lower left',
              ncol=2,
              mode="expand",
              borderaxespad=0.)
    ax.bar_label(rects1, padding=3)
    if id == 2:
        ax.bar_label(rects2, padding=3)
    buf = BytesIO()
    fig.savefig(buf, format="png")
    data = base64.b64encode(buf.getbuffer()).decode("ascii")
    labels.clear()
    bar1_list.clear()
    bar2_list.clear()
    return html + f"""
Пример #55
0
class AnimFile:
    def __init__(self, split=False, old=True):

        self.old = old
        self.split = split

        if old:
            self.raw_data = BytesIO()
        else:
            self.afm2 = AFM2()
            self.afsa, self.afsb = (AFSA(), AFSB()) if split else (None, None)

    def read(self, f):

        if self.old:
            self.raw_data.write(f.read())
            self.raw_data.seek(0)

        else:

            while True:

                try:

                    magic = f.read(4).decode('utf-8')

                except EOFError:
                    break

                except struct.error:
                    break

                except UnicodeDecodeError:
                    print('\nAttempted reading non-chunked data.')
                    break

                if not magic:
                    break

                magic_lower = magic.lower()

                local_chunk = getattr(self, magic_lower)

                if not local_chunk:
                    setattr(self, magic_lower, chunk().read(f))
                else:
                    local_chunk.read(f)

        return self

    def write(self, f):

        if self.old:
            f.write(bytes(self.raw_data.getbuffer()))

        else:

            self.afm2.write(f)

            if self.afsa:
                self.afsa.write(f)

            if self.afsb:
                self.afsb.write(f)

        return self
w = BytesIO()
w.write(magic)

# write the TLV
for file in files:
    filepath = f"{dirpath}{os.sep}{file}"
    with open(filepath, 'rb') as f:
        file = os.path.basename(file)
        b = f.read()
        z = zlib.compress(b)
        w.write(
            struct.pack(f'<{len(file) + 1}sII{len(z)}s',
                        file.encode('utf8') + b'\x00', len(z), len(b), z))

charset = (string.ascii_letters + string.digits).encode()

data_buf = w.getbuffer()[len(magic):]
# otp = [random.choice(charset) for i in range(len(data_buf))]
otp = []
with open('./baby-parser3/otp.key', 'rb') as f:
    otp = list(f.read())
buf = [b ^ otp[i] for i, b in enumerate(data_buf)]
w.getbuffer()[len(magic):] = bytes(buf)

# with open('./build/otp.key', 'wb') as f:
#     f.write(bytes(otp))

os.makedirs('build', exist_ok=True)
with open('./build/baby-parser3.wh', 'wb') as f:
    f.write(w.getvalue())
Пример #57
0
    def do_GET(self):
        lastlimit = "00"
        lastdetect = "00"
        downscale = argdownscale
        matches = 2
        possiblematch = "00"
        timeInterval = 1
        lastTime = 0
        if self.path.endswith('.mjpg'):
            self.send_response(200)
            self.send_header(
                'Content-type',
                'multipart/x-mixed-replace; boundary=--jpgboundary')
            self.end_headers()

            while True:
                try:
                    #tästä poistettu koodi muistiossa
                    rc, img = capture.read()
                    origframe = img
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    img = cv2.resize(img, (128, 128))
                    comp = "Unknown"
                    comp, amnt = run_flann(img)

                    if comp != "Unknown":
                        if comp != lastlimit:
                            if comp == lastdetect:
                                possiblematch = comp
                                matches = matches + 1
                                if matches >= argmatches:
                                    lastlimit = possiblematch
                                    matches = 0
                            else:
                                possiblematch = "00"
                                matches = 0
                    else:
                        #print("Unknow speed limit")
                        comp = lastdetect

                    lastdetect = comp
                    cv2.putText(
                        origframe,
                        "Current speed limit: " + str(lastlimit) + " km/h.",
                        (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                        2)

                    imgRGB = origframe
                    imgRGB = cv2.resize(imgRGB, (320, 240))
                    jpg = Image.fromarray(imgRGB)
                    tmpFile = BytesIO()
                    jpg.save(tmpFile, 'JPEG')
                    self.wfile.write("--jpgboundary".encode())
                    self.send_header('Content-type', 'image/jpeg')
                    self.send_header('Content-length',
                                     str(tmpFile.getbuffer().nbytes))
                    self.end_headers()
                    jpg.save(self.wfile, 'JPEG')

                except KeyboardInterrupt:
                    break
            return
        if self.path.endswith('.html'):
            self.send_response(200)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write('<html><head></head><body>'.encode())
            self.wfile.write(
                '<img src="http://172.20.10.9:8080/cam.mjpg"/>'.encode())
            self.wfile.write('</body></html>'.encode())
            return
Пример #58
0
def update_redis_stats():
    """Generate the system stats and save them in redis

    Returns
    -------
    list of str
        artifact filepaths that are not present in the file system
    """
    STUDY = qdb.study.Study

    number_studies = {'public': 0, 'private': 0, 'sandbox': 0}
    number_of_samples = {'public': 0, 'private': 0, 'sandbox': 0}
    num_studies_ebi = 0
    num_samples_ebi = 0
    number_samples_ebi_prep = 0
    stats = []
    missing_files = []
    per_data_type_stats = Counter()
    for study in STUDY.iter():
        st = study.sample_template
        if st is None:
            continue

        # counting samples submitted to EBI-ENA
        len_samples_ebi = sum(
            [esa is not None for esa in st.ebi_sample_accessions.values()])
        if len_samples_ebi != 0:
            num_studies_ebi += 1
            num_samples_ebi += len_samples_ebi

        samples_status = defaultdict(set)
        for pt in study.prep_templates():
            pt_samples = list(pt.keys())
            pt_status = pt.status
            if pt_status == 'public':
                per_data_type_stats[pt.data_type()] += len(pt_samples)
            samples_status[pt_status].update(pt_samples)
            # counting experiments (samples in preps) submitted to EBI-ENA
            number_samples_ebi_prep += sum([
                esa is not None
                for esa in pt.ebi_experiment_accessions.values()
            ])

        # counting studies
        if 'public' in samples_status:
            number_studies['public'] += 1
        elif 'private' in samples_status:
            number_studies['private'] += 1
        else:
            # note that this is a catch all for other status; at time of
            # writing there is status: awaiting_approval
            number_studies['sandbox'] += 1

        # counting samples; note that some of these lines could be merged with
        # the block above but I decided to split it in 2 for clarity
        if 'public' in samples_status:
            number_of_samples['public'] += len(samples_status['public'])
        if 'private' in samples_status:
            number_of_samples['private'] += len(samples_status['private'])
        if 'sandbox' in samples_status:
            number_of_samples['sandbox'] += len(samples_status['sandbox'])

        # processing filepaths
        for artifact in study.artifacts():
            for adata in artifact.filepaths:
                try:
                    s = stat(adata['fp'])
                except OSError:
                    missing_files.append(adata['fp'])
                else:
                    stats.append((adata['fp_type'], s.st_size,
                                  strftime('%Y-%m', localtime(s.st_mtime))))

    num_users = qdb.util.get_count('qiita.qiita_user')
    num_processing_jobs = qdb.util.get_count('qiita.processing_job')

    lat_longs = dumps(get_lat_longs())

    summary = {}
    all_dates = []
    # these are some filetypes that are too small to plot alone so we'll merge
    # in other
    group_other = {
        'html_summary', 'tgz', 'directory', 'raw_fasta', 'log', 'biom',
        'raw_sff', 'raw_qual', 'qza', 'html_summary_dir', 'qza', 'plain_text',
        'raw_barcodes'
    }
    for ft, size, ym in stats:
        if ft in group_other:
            ft = 'other'
        if ft not in summary:
            summary[ft] = {}
        if ym not in summary[ft]:
            summary[ft][ym] = 0
            all_dates.append(ym)
        summary[ft][ym] += size
    all_dates = sorted(set(all_dates))

    # sorting summaries
    ordered_summary = {}
    for dt in summary:
        new_list = []
        current_value = 0
        for ad in all_dates:
            if ad in summary[dt]:
                current_value += summary[dt][ad]
            new_list.append(current_value)
        ordered_summary[dt] = new_list

    plot_order = sorted([(k, ordered_summary[k][-1]) for k in ordered_summary],
                        key=lambda x: x[1])

    # helper function to generate y axis, modified from:
    # http://stackoverflow.com/a/1094933
    def sizeof_fmt(value, position):
        number = None
        for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
            if abs(value) < 1024.0:
                number = "%3.1f%s" % (value, unit)
                break
            value /= 1024.0
        if number is None:
            number = "%.1f%s" % (value, 'Yi')
        return number

    all_dates_axis = range(len(all_dates))
    plt.locator_params(axis='y', nbins=10)
    plt.figure(figsize=(20, 10))
    for k, v in plot_order:
        plt.plot(all_dates_axis, ordered_summary[k], linewidth=2, label=k)

    plt.xticks(all_dates_axis, all_dates)
    plt.legend()
    plt.grid()
    ax = plt.gca()
    ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(sizeof_fmt))
    plt.xticks(rotation=90)
    plt.xlabel('Date')
    plt.ylabel('Storage space per data type')

    plot = BytesIO()
    plt.savefig(plot, format='png')
    plot.seek(0)
    img = 'data:image/png;base64,' + quote(b64encode(plot.getbuffer()))

    time = datetime.now().strftime('%m-%d-%y %H:%M:%S')

    portal = qiita_config.portal
    # making sure per_data_type_stats has some data so hmset doesn't fail
    if per_data_type_stats == {}:
        per_data_type_stats['No data'] = 0

    vals = [('number_studies', number_studies, r_client.hmset),
            ('number_of_samples', number_of_samples, r_client.hmset),
            ('per_data_type_stats', dict(per_data_type_stats), r_client.hmset),
            ('num_users', num_users, r_client.set),
            ('lat_longs', (lat_longs), r_client.set),
            ('num_studies_ebi', num_studies_ebi, r_client.set),
            ('num_samples_ebi', num_samples_ebi, r_client.set),
            ('number_samples_ebi_prep', number_samples_ebi_prep, r_client.set),
            ('img', img, r_client.set), ('time', time, r_client.set),
            ('num_processing_jobs', num_processing_jobs, r_client.set)]
    for k, v, f in vals:
        redis_key = '%s:stats:%s' % (portal, k)
        # important to "flush" variables to avoid errors
        r_client.delete(redis_key)
        f(redis_key, v)

    # preparing vals to insert into DB
    vals = dumps(dict([x[:-1] for x in vals]))
    with qdb.sql_connection.TRN:
        sql = """INSERT INTO qiita.stats_daily (stats, stats_timestamp)
                 VALUES (%s, NOW())"""
        qdb.sql_connection.TRN.add(sql, [vals])
        qdb.sql_connection.TRN.execute()

    return missing_files
Пример #59
0
def mock_text_box(mock_text_box_stream, tmpdir_factory):
    file = BytesIO(mock_text_box_stream)
    fn = tmpdir_factory.mktemp("data").join("mock_text_box_file.png")
    with open(fn, 'wb') as f:
        f.write(file.getbuffer())
    return str(fn)
Пример #60
0
def mock_cord_dataset(tmpdir_factory, mock_image_stream):
    root = tmpdir_factory.mktemp('datasets')
    cord_root = root.mkdir('cord_train')
    image_folder = cord_root.mkdir("image")
    annotations_folder = cord_root.mkdir("json")
    labels = {
        "dontcare": [],
        "valid_line": [{
            "words": [{
                "quad": {
                    "x2": 270,
                    "y3": 390,
                    "x3": 270,
                    "y4": 390,
                    "x1": 256,
                    "y1": 374,
                    "x4": 256,
                    "y2": 374
                },
                "is_key": 0,
                "row_id": 2179893,
                "text": "I"
            }],
            "category":
            "menu.cnt",
            "group_id":
            3
        }, {
            "words": [{
                "quad": {
                    "x2": 270,
                    "y3": 418,
                    "x3": 270,
                    "y4": 418,
                    "x1": 258,
                    "y1": 402,
                    "x4": 258,
                    "y2": 402
                },
                "is_key": 0,
                "row_id": 2179894,
                "text": "am"
            }],
            "category":
            "menu.cnt",
            "group_id":
            4
        }, {
            "words": [{
                "quad": {
                    "x2": 272,
                    "y3": 444,
                    "x3": 272,
                    "y4": 444,
                    "x1": 258,
                    "y1": 428,
                    "x4": 258,
                    "y2": 428
                },
                "is_key": 0,
                "row_id": 2179895,
                "text": "Luke"
            }],
            "category":
            "menu.cnt",
            "group_id":
            5
        }]
    }

    file = BytesIO(mock_image_stream)
    for i in range(3):
        fn_i = image_folder.join(f"receipt_{i}.png")
        with open(fn_i, 'wb') as f:
            f.write(file.getbuffer())
        fn_l = annotations_folder.join(f"receipt_{i}.json")
        with open(fn_l, 'w') as f:
            json.dump(labels, f)

    # Packing data into an archive to simulate the real data set and bypass archive extraction
    archive_path = root.join('cord_train.zip')
    shutil.make_archive(root.join('cord_train'), 'zip', str(cord_root))
    return str(archive_path)