def test_empty(self): c1, c2, c3 = build_commit_graph(self.repo.object_store, [[1], [2, 1], [3, 1, 2]]) self.repo.refs[b"HEAD"] = c3.id outstream = BytesIO() porcelain.diff_tree(self.repo.path, c2.tree, c3.tree, outstream=outstream) self.assertEqual(outstream.getvalue(), b"")
def test_read_opts(): # tests if read is seeing option sets, at initialization and after # initialization arr = np.arange(6).reshape(1,6) stream = BytesIO() savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) back_dict = rdr.get_variables() rarr = back_dict['a'] assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, squeeze_me=True) assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) rdr.squeeze_me = False assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, byte_order=boc.native_code) assert_array_equal(rdr.get_variables()['a'], arr) # inverted byte code leads to error on read because of swapped # header etc rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) assert_raises(Exception, rdr.get_variables) rdr.byte_order = boc.native_code assert_array_equal(rdr.get_variables()['a'], arr) arr = np.array(['a string']) stream.truncate(0) stream.seek(0) savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) assert_array_equal(rdr.get_variables()['a'], arr) rdr = MatFile5Reader(stream, chars_as_strings=False) carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) assert_array_equal(rdr.get_variables()['a'], carr) rdr.chars_as_strings = True assert_array_equal(rdr.get_variables()['a'], arr)
def test_read_atom(self): """Test that all fields are read from PDB ATOM records""" s = BytesIO() # PDB is fixed-format; we should be able to read coordinates even # without spaces between them s.write(b'ATOM 1 N ALA A 5 3000.0001000.4002000.600' b' 2.00 6.40 N\n') s.seek(0) m = IMP.Model() pdb = IMP.atom.read_pdb(s, m) atoms = IMP.atom.get_by_type(pdb, IMP.atom.ATOM_TYPE) self.assertEqual(len(atoms), 1) a = IMP.atom.Atom(atoms[0]) r = IMP.atom.Residue(a.get_parent()) c = IMP.atom.Chain(r.get_parent()) self.assertEqual(a.get_input_index(), 1) self.assertEqual(a.get_atom_type().get_string(), 'N') # Note: currently don't read alternate location or insertion code self.assertEqual(r.get_residue_type().get_string(), 'ALA') self.assertEqual(c.get_id(), 'A') self.assertEqual(r.get_index(), 5) coord = IMP.core.XYZ(a).get_coordinates() self.assertAlmostEqual(coord[0], 3000.000, delta=0.001) self.assertAlmostEqual(coord[1], 1000.400, delta=0.001) self.assertAlmostEqual(coord[2], 2000.600, delta=0.001) self.assertAlmostEqual(a.get_occupancy(), 2.00, delta=0.01) self.assertAlmostEqual(a.get_temperature_factor(), 6.40, delta=0.01)
def open(self, name, mode='rb'): resp = self.b2.download_file(name) output = BytesIO() output.write(resp) output.seek(0) return File(output, name)
def _serialize_properties(self): """serialize the 'properties' attribute (a dictionary) into the raw bytes making up a set of property flags and a property list, suitable for putting into a content frame header.""" shift = 15 flag_bits = 0 flags = [] sformat, svalues = [], [] props = self.properties props.setdefault('content_encoding', 'utf-8') for key, proptype in self.PROPERTIES: val = props.get(key, None) if val is not None: if shift == 0: flags.append(flag_bits) flag_bits = 0 shift = 15 flag_bits |= (1 << shift) if proptype != 'bit': sformat.append(proptype) svalues.append(val) shift -= 1 flags.append(flag_bits) result = BytesIO() write = result.write for flag_bits in flags: write(pack('>H', flag_bits)) write(dumps(''.join(sformat), svalues)) return result.getvalue()
def download_sentiments(self): print("Download sentiment data from webservice") f1 = open('../tmp/results.txt', 'w+') for i in range(self.data[:, ].shape[0]): print("Downloading entry " + str(i)) escaped = re.escape(self.data[i, 1]) post_data = {'text': escaped} buffer = BytesIO() c = pycurl.Curl() c.setopt(c.URL, 'http://text-processing.com/api/sentiment/') c.setopt(c.POSTFIELDS, urllib.urlencode(post_data)) c.setopt(c.WRITEDATA, buffer) c.perform() c.close() body = buffer.getvalue() # Body is a byte string. # We have to know the encoding in order to print it to a text file # such as standard output. response = body.decode('utf-8') self.results.append(response) f1.write(str(self.results))
class TestFileUploadParser(TestCase): def setUp(self): class MockRequest(object): pass from io import BytesIO self.stream = BytesIO( "Test text file".encode('utf-8') ) request = MockRequest() request.upload_handlers = (MemoryFileUploadHandler(),) request.META = { 'HTTP_CONTENT_DISPOSITION': 'Content-Disposition: inline; filename=file.txt'.encode('utf-8'), 'HTTP_CONTENT_LENGTH': 14, } self.parser_context = {'request': request, 'kwargs': {}} def test_parse(self): """ Make sure the `QueryDict` works OK """ parser = FileUploadParser() self.stream.seek(0) data_and_files = parser.parse(self.stream, None, self.parser_context) file_obj = data_and_files.files['file'] self.assertEqual(file_obj._size, 14) def test_get_filename(self): parser = FileUploadParser() filename = parser.get_filename(self.stream, None, self.parser_context) self.assertEqual(filename, 'file.txt'.encode('utf-8'))
def _process(self): f = request.files['logo'] try: img = Image.open(f) except IOError: flash(_('You cannot upload this file as a logo.'), 'error') return jsonify_data(content=None) if img.format.lower() not in {'jpeg', 'png', 'gif'}: flash(_('The file has an invalid format ({format})').format(format=img.format), 'error') return jsonify_data(content=None) if img.mode == 'CMYK': flash(_('The logo you uploaded is using the CMYK colorspace and has been converted to RGB. Please check if ' 'the colors are correct and convert it manually if necessary.'), 'warning') img = img.convert('RGB') image_bytes = BytesIO() img.save(image_bytes, 'PNG') image_bytes.seek(0) content = image_bytes.read() self.event.logo = content self.event.logo_metadata = { 'hash': crc32(content), 'size': len(content), 'filename': os.path.splitext(secure_filename(f.filename, 'logo'))[0] + '.png', 'content_type': 'image/png' } flash(_('New logo saved'), 'success') logger.info("New logo '%s' uploaded by %s (%s)", f.filename, session.user, self.event) return jsonify_data(content=get_logo_data(self.event))
def test_simple_bytesio(self): f = BytesIO() c = Commit() c.committer = c.author = b"Jelmer <*****@*****.**>" c.commit_time = c.author_time = 1271350201 c.commit_timezone = c.author_timezone = 0 c.message = b"This is the first line\nAnd this is the second line.\n" c.tree = Tree().id write_commit_patch(f, c, b"CONTENTS", (1, 1), version="custom") f.seek(0) lines = f.readlines() self.assertTrue(lines[0].startswith(b"From 0b0d34d1b5b596c928adc9a727a4b9e03d025298")) self.assertEqual(lines[1], b"From: Jelmer <*****@*****.**>\n") self.assertTrue(lines[2].startswith(b"Date: ")) self.assertEqual( [ b"Subject: [PATCH 1/1] This is the first line\n", b"And this is the second line.\n", b"\n", b"\n", b"---\n", ], lines[3:8], ) self.assertEqual([b"CONTENTS-- \n", b"custom\n"], lines[-2:]) if len(lines) >= 12: # diffstat may not be present self.assertEqual(lines[8], b" 0 files changed\n")
def process(self, response:Response, responseCnt:ResponseContent, **keyargs): ''' @see: HandlerProcessorProceed.process ''' assert isinstance(response, Response), 'Invalid response %s' % response assert isinstance(responseCnt, ResponseContent), 'Invalid response content %s' % responseCnt if response.isSuccess is False: return # Skip in case the response is in error if Response.encoder not in response: return # Skip in case there is no encoder to render assert callable(response.renderFactory), 'Invalid response renderer factory %s' % response.renderFactory output = BytesIO() render = response.renderFactory(output) assert isinstance(render, IRender), 'Invalid render %s' % render resolve = Resolve(response.encoder).request(value=response.obj, render=render, **response.encoderData or {}) if not self.allowChunked and ResponseContent.length not in responseCnt: while resolve.has(): resolve.do() content = output.getvalue() responseCnt.length = len(content) responseCnt.source = (content,) output.close() else: responseCnt.source = self.renderAsGenerator(resolve, output, self.bufferSize)
def create_dummy_image(): file = BytesIO() image = Image.new('RGBA', size=(50, 50), color=(155, 0, 0)) image.save(file, 'png') file.name = 'test_image.png' file.seek(0) return file
def resguardoPdf(request, pk): resguardo = Resguardo.objects.get(id=pk) nombre = 'resguardo_' + str(resguardo.id) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment;filename=resguardo.pdf' buffer = BytesIO() c = canvas.Canvas(buffer, pagesize=letter) def header(): c.drawImage('static/images/CFPPCH.png', 10, 670, 130, 130) c.setLineWidth(.3) c.setFont('Helvetica-Bold', 20) c.drawString(120, 750, 'CEFPP') c.drawString(160, 740, ) c.setFont('Helvetica-Bold', 15) c.drawString(480, 750, 'Inventario') c.setFillColorRGB(255, 0, 0) c.setFont('Helvetica', 12) c.drawString(485, 735, resguardo.inventario) c.line(460, 747, 560, 747) header() c.showPage() c.save() pdf = buffer.getvalue() buffer.close() response.write(pdf) return response
def overwrite_file(node_id, file_name): params = '?suppress=deduplication' # suppresses 409 response buffer = BytesIO() c = pycurl.Curl() c.setopt(c.URL, oauth.get_content_url() + 'nodes/' + node_id + '/content' + params) c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()]) c.setopt(c.WRITEDATA, buffer) c.setopt(c.HTTPPOST, [('content', (c.FORM_FILE, file_name.encode('UTF-8')))]) c.setopt(c.CUSTOMREQUEST, 'PUT') c.setopt(c.NOPROGRESS, 0) c.setopt(c.PROGRESSFUNCTION, progress) if logger.getEffectiveLevel() == logging.DEBUG: c.setopt(c.VERBOSE, 1) try: c.perform() except pycurl.error as e: raise RequestError(0, e) status = c.getinfo(pycurl.HTTP_CODE) c.close() print() # break progress line body = buffer.getvalue().decode('utf-8') if status != http.OK: # print('Overwriting "%s" failed.' % file_name) raise RequestError(status, body) return json.loads(body)
def upload_file(file_name, parent=None): params = '?suppress=deduplication' # suppresses 409 response metadata = {'kind': 'FILE', 'name': os.path.basename(file_name)} if parent: metadata['parents'] = [parent] buffer = BytesIO() c = pycurl.Curl() c.setopt(c.URL, oauth.get_content_url() + 'nodes' + params) c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()]) c.setopt(c.WRITEDATA, buffer) c.setopt(c.HTTPPOST, [('metadata', json.dumps(metadata)), ('content', (c.FORM_FILE, file_name.encode('UTF-8')))]) c.setopt(c.NOPROGRESS, 0) c.setopt(c.PROGRESSFUNCTION, progress) if logger.getEffectiveLevel() == logging.DEBUG: c.setopt(c.VERBOSE, 1) try: c.perform() except pycurl.error as e: raise RequestError(0, e) status = c.getinfo(pycurl.HTTP_CODE) c.close() print() # break progress line body = buffer.getvalue().decode('utf-8') if status != http.CREATED: # print('Uploading "%s" failed.' % file_name) raise RequestError(status, body) return json.loads(body)
def _save(im, fp, filename): fp.write(_MAGIC) # (2+2) sizes = im.encoderinfo.get("sizes", [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (255, 255)]) width, height = im.size filter(lambda x: False if (x[0] > width or x[1] > height or x[0] > 255 or x[1] > 255) else True, sizes) sizes = sorted(sizes, key=lambda x: x[0]) fp.write(struct.pack("H", len(sizes))) # idCount(2) offset = fp.tell() + len(sizes)*16 for size in sizes: width, height = size fp.write(struct.pack("B", width)) # bWidth(1) fp.write(struct.pack("B", height)) # bHeight(1) fp.write(b"\0") # bColorCount(1) fp.write(b"\0") # bReserved(1) fp.write(b"\0\0") # wPlanes(2) fp.write(struct.pack("H", 32)) # wBitCount(2) image_io = BytesIO() tmp = im.copy() tmp.thumbnail(size, Image.ANTIALIAS) tmp.save(image_io, "png") image_io.seek(0) image_bytes = image_io.read() bytes_len = len(image_bytes) fp.write(struct.pack("I", bytes_len)) # dwBytesInRes(4) fp.write(struct.pack("I", offset)) # dwImageOffset(4) current = fp.tell() fp.seek(offset) fp.write(image_bytes) offset = offset + bytes_len fp.seek(current)
def test_object_diff_bin_blob_force(self): f = BytesIO() # Prepare two slightly different PNG headers b1 = Blob.from_string( b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52" b"\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x04\x00\x00\x00\x05\x04\x8b" ) b2 = Blob.from_string( b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52" b"\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x03\x00\x00\x00\x98\xd3\xb3" ) store = MemoryObjectStore() store.add_objects([(b1, None), (b2, None)]) write_object_diff(f, store, (b"foo.png", 0o644, b1.id), (b"bar.png", 0o644, b2.id), diff_binary=True) self.assertEqual( [ b"diff --git a/foo.png b/bar.png", b"index f73e47d..06364b7 644", b"--- a/foo.png", b"+++ b/bar.png", b"@@ -1,4 +1,4 @@", b" \x89PNG", b" \x1a", b" \x00\x00\x00", b"-IHDR\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x04\x00\x00\x00\x05\x04\x8b", b"\\ No newline at end of file", b"+IHDR\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x03\x00\x00\x00\x98\xd3\xb3", b"\\ No newline at end of file", ], f.getvalue().splitlines(), )
def deserialize(schema, binary): bytes_writer = BytesIO() bytes_writer.write(binary) bytes_writer.seek(0) res = fastavro.schemaless_reader(bytes_writer, schema) return res
def test_authoritativeMessage(self): """ The L{RRHeader} instances created by L{Message} from an authoritative message are marked as authoritative. """ buf = BytesIO() answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0)) answer.encode(buf) data = ( b'\x01\x00' # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit b'\x04' # recursion bit, empty bit, empty bit, empty bit, response code # nibble b'\x00' b'\x00\x00' # number of queries b'\x00\x01' # number of answers b'\x00\x00' # number of authorities b'\x00\x00' # number of additionals + buf.getvalue() ) answer.auth = True self.parser.updateData(data) message = self.parser.message() self.assertEqual(message.answers, [answer]) self.assertTrue(message.answers[0].auth)
class FakePayload(object): """ A wrapper around BytesIO that restricts what can be read since data from the network can't be seeked and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in Real Life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after he's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content)
def test_commit_serialization(self): assert_commit_serialization(self.gitrwrepo, self.gitrwrepo.head, True) rwrepo = self.gitrwrepo make_object = rwrepo.odb.store # direct serialization - deserialization can be tested afterwards # serialization is probably limited on IO hc = rwrepo.commit(rwrepo.head) nc = 5000 st = time() for i in xrange(nc): cm = Commit(rwrepo, Commit.NULL_BIN_SHA, hc.tree, hc.author, hc.authored_date, hc.author_tz_offset, hc.committer, hc.committed_date, hc.committer_tz_offset, str(i), parents=hc.parents, encoding=hc.encoding) stream = BytesIO() cm._serialize(stream) slen = stream.tell() stream.seek(0) cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha # END commit creation elapsed = time() - st print("Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed), file=sys.stderr)
def get_compressed_file_data(file_path, compresslevel=5): compressed_buffer = BytesIO() gzip_file = GzipFile(mode='wb', compresslevel=compresslevel, fileobj=compressed_buffer) try: fileobj = open(file_path, 'rb') while True: x = fileobj.read(65536) if not x: break gzip_file.write(x) x = None fileobj.close() except IOError as e: LOG.error(str(e)) return None gzip_file.close() compressed_data = compressed_buffer.getvalue() compressed_buffer.close() return compressed_data
def test_write_struct(): b = BytesIO() item = TItem(id=123, phones=['123456', 'abcdef']) proto.TCyBinaryProtocol(b).write_struct(item) assert_equal("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 " "06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00", hexlify(b.getvalue()))
def __init__(self, buf): self._progress = 0 self._len = len(buf) self._bar = None if self._len > 4096: self._bar = progress.Bar(filled_char='=', every=4096) BytesIO.__init__(self, buf)
class UnicodeWriter: """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue self.queue = BytesIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwds) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)('replace') def writerow(self, row): row = [smart_text(s) for s in row] self.writer.writerow([s.encode("utf-8") for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0) def writerows(self, rows): for row in rows: self.writerow(row)
def test_save_dict(): # Test that dict can be saved (as recarray), loaded as matstruct dict_types = ((dict, False),) try: from collections import OrderedDict except ImportError: pass else: dict_types += ((OrderedDict, True),) ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)]) for dict_type, is_ordered in dict_types: # Initialize with tuples to keep order for OrderedDict d = dict_type([('a', 1), ('b', 2)]) stream = BytesIO() savemat(stream, {'dict': d}) stream.seek(0) vals = loadmat(stream)['dict'] assert_equal(set(vals.dtype.names), set(['a', 'b'])) if is_ordered: # Input was ordered, output in ab order assert_array_equal(vals, ab_exp) else: # Not ordered input, either order output if vals.dtype.names[0] == 'a': assert_array_equal(vals, ab_exp) else: assert_array_equal(vals, ba_exp)
def to_pptx(self): logger.info('Converting svg -> html -> png -> pptx') content = None try: # convert to png png_fn = self._rasterize_png() # create blank presentation slide layout pres = Presentation() blank_slidelayout = pres.slide_layouts[6] slide = pres.slides.add_slide(blank_slidelayout) self._pptx_add_title(slide) self._pptx_add_url(slide) self._pptx_add_png(slide, png_fn) self._pptx_add_hawc_logo(slide) # save as object content = BytesIO() pres.save(content) content.seek(0) except Exception as e: logger.error(e, exc_info=True) finally: self.cleanup() return content
def test_store(self): out = BytesIO() storage = StdoutFeedStorage('stdout:', _stdout=out) file = storage.open(scrapy.Spider("default")) file.write(b"content") yield storage.store(file) self.assertEqual(out.getvalue(), b"content")
def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) #BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2
class PreambleTestCase(unittest.TestCase): class doc_info: doc_id = 'D-deadbeef' rev = '397932e0c77f45fcb7c3732930e7e9b2:1' def setUp(self): self.cleartext = BytesIO(snowden1) self.blob = _crypto.BlobEncryptor( self.doc_info, self.cleartext, secret='A' * 96) def test_preamble_starts_with_magic_signature(self): preamble = self.blob._encode_preamble() assert preamble.startswith(_crypto.MAGIC) def test_preamble_has_cipher_metadata(self): preamble = self.blob._encode_preamble() unpacked = _preamble.PACMAN.unpack(preamble) encryption_scheme, encryption_method = unpacked[1:3] assert encryption_scheme in _crypto.ENC_SCHEME assert encryption_method in _crypto.ENC_METHOD assert unpacked[4] == self.blob.iv def test_preamble_has_document_sync_metadata(self): preamble = self.blob._encode_preamble() unpacked = _preamble.PACMAN.unpack(preamble) doc_id, doc_rev = unpacked[5:7] assert doc_id == self.doc_info.doc_id assert doc_rev == self.doc_info.rev def test_preamble_has_document_size(self): preamble = self.blob._encode_preamble() unpacked = _preamble.PACMAN.unpack(preamble) size = unpacked[7] assert size == _crypto._ceiling(len(snowden1)) @defer.inlineCallbacks def test_preamble_can_come_without_size(self): # XXX: This test case is here only to test backwards compatibility! preamble = self.blob._encode_preamble() # repack preamble using legacy format, without doc size unpacked = _preamble.PACMAN.unpack(preamble) preamble_without_size = _preamble.LEGACY_PACMAN.pack(*unpacked[0:7]) # encrypt it manually for custom tag ciphertext, tag = _aes_encrypt(self.blob.sym_key, self.blob.iv, self.cleartext.getvalue(), aead=preamble_without_size) ciphertext = ciphertext + tag # encode it ciphertext = base64.urlsafe_b64encode(ciphertext) preamble_without_size = base64.urlsafe_b64encode(preamble_without_size) # decrypt it ciphertext = preamble_without_size + ' ' + ciphertext cleartext = yield _crypto.BlobDecryptor( self.doc_info, BytesIO(ciphertext), secret='A' * 96).decrypt() assert cleartext.getvalue() == self.cleartext.getvalue() warnings = self.flushWarnings() assert len(warnings) == 1 assert 'legacy preamble without size' in warnings[0]['message']
def test_simple(self): c1, c2, c3 = build_commit_graph(self.repo.object_store, [[1], [2, 1], [3, 1, 2]]) self.repo.refs["HEAD"] = c3.id outstream = BytesIO() porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream) self.assertTrue(outstream.getvalue().startswith("-" * 50))