Example #1
0
 def test_simple(self):
     c1, c2, c3 = build_commit_graph(self.repo.object_store, [[1], [2, 1],
         [3, 1, 2]])
     self.repo.refs["HEAD"] = c3.id
     outstream = BytesIO()
     porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
     self.assertTrue(outstream.getvalue().startswith("-" * 50))
Example #2
0
class PreambleTestCase(unittest.TestCase):
    class doc_info:
        doc_id = 'D-deadbeef'
        rev = '397932e0c77f45fcb7c3732930e7e9b2:1'

    def setUp(self):
        self.cleartext = BytesIO(snowden1)
        self.blob = _crypto.BlobEncryptor(
            self.doc_info, self.cleartext,
            secret='A' * 96)

    def test_preamble_starts_with_magic_signature(self):
        preamble = self.blob._encode_preamble()
        assert preamble.startswith(_crypto.MAGIC)

    def test_preamble_has_cipher_metadata(self):
        preamble = self.blob._encode_preamble()
        unpacked = _preamble.PACMAN.unpack(preamble)
        encryption_scheme, encryption_method = unpacked[1:3]
        assert encryption_scheme in _crypto.ENC_SCHEME
        assert encryption_method in _crypto.ENC_METHOD
        assert unpacked[4] == self.blob.iv

    def test_preamble_has_document_sync_metadata(self):
        preamble = self.blob._encode_preamble()
        unpacked = _preamble.PACMAN.unpack(preamble)
        doc_id, doc_rev = unpacked[5:7]
        assert doc_id == self.doc_info.doc_id
        assert doc_rev == self.doc_info.rev

    def test_preamble_has_document_size(self):
        preamble = self.blob._encode_preamble()
        unpacked = _preamble.PACMAN.unpack(preamble)
        size = unpacked[7]
        assert size == _crypto._ceiling(len(snowden1))

    @defer.inlineCallbacks
    def test_preamble_can_come_without_size(self):
        # XXX: This test case is here only to test backwards compatibility!
        preamble = self.blob._encode_preamble()
        # repack preamble using legacy format, without doc size
        unpacked = _preamble.PACMAN.unpack(preamble)
        preamble_without_size = _preamble.LEGACY_PACMAN.pack(*unpacked[0:7])
        # encrypt it manually for custom tag
        ciphertext, tag = _aes_encrypt(self.blob.sym_key, self.blob.iv,
                                       self.cleartext.getvalue(),
                                       aead=preamble_without_size)
        ciphertext = ciphertext + tag
        # encode it
        ciphertext = base64.urlsafe_b64encode(ciphertext)
        preamble_without_size = base64.urlsafe_b64encode(preamble_without_size)
        # decrypt it
        ciphertext = preamble_without_size + ' ' + ciphertext
        cleartext = yield _crypto.BlobDecryptor(
            self.doc_info, BytesIO(ciphertext),
            secret='A' * 96).decrypt()
        assert cleartext.getvalue() == self.cleartext.getvalue()
        warnings = self.flushWarnings()
        assert len(warnings) == 1
        assert 'legacy preamble without size' in warnings[0]['message']
Example #3
0
 def test_store(self):
     out = BytesIO()
     storage = StdoutFeedStorage('stdout:', _stdout=out)
     file = storage.open(scrapy.Spider("default"))
     file.write(b"content")
     yield storage.store(file)
     self.assertEqual(out.getvalue(), b"content")
Example #4
0
 def test_empty(self):
     c1, c2, c3 = build_commit_graph(self.repo.object_store, [[1], [2, 1],
         [3, 1, 2]])
     self.repo.refs[b"HEAD"] = c3.id
     outstream = BytesIO()
     porcelain.diff_tree(self.repo.path, c2.tree, c3.tree, outstream=outstream)
     self.assertEqual(outstream.getvalue(), b"")
Example #5
0
    def test_read_atom(self):
        """Test that all fields are read from PDB ATOM records"""
        s = BytesIO()
        # PDB is fixed-format; we should be able to read coordinates even
        # without spaces between them
        s.write(b'ATOM      1  N   ALA A   5    3000.0001000.4002000.600'
                b'  2.00  6.40           N\n')
        s.seek(0)

        m = IMP.Model()
        pdb = IMP.atom.read_pdb(s, m)
        atoms = IMP.atom.get_by_type(pdb, IMP.atom.ATOM_TYPE)
        self.assertEqual(len(atoms), 1)
        a = IMP.atom.Atom(atoms[0])
        r = IMP.atom.Residue(a.get_parent())
        c = IMP.atom.Chain(r.get_parent())
        self.assertEqual(a.get_input_index(), 1)
        self.assertEqual(a.get_atom_type().get_string(), 'N')
        # Note: currently don't read alternate location or insertion code
        self.assertEqual(r.get_residue_type().get_string(), 'ALA')
        self.assertEqual(c.get_id(), 'A')
        self.assertEqual(r.get_index(), 5)
        coord = IMP.core.XYZ(a).get_coordinates()
        self.assertAlmostEqual(coord[0], 3000.000, delta=0.001)
        self.assertAlmostEqual(coord[1], 1000.400, delta=0.001)
        self.assertAlmostEqual(coord[2], 2000.600, delta=0.001)
        self.assertAlmostEqual(a.get_occupancy(), 2.00, delta=0.01)
        self.assertAlmostEqual(a.get_temperature_factor(), 6.40, delta=0.01)
Example #6
0
def test_save_dict():
    # Test that dict can be saved (as recarray), loaded as matstruct
    dict_types = ((dict, False),)
    try:
        from collections import OrderedDict
    except ImportError:
        pass
    else:
        dict_types += ((OrderedDict, True),)
    ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
    ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
    for dict_type, is_ordered in dict_types:
        # Initialize with tuples to keep order for OrderedDict
        d = dict_type([('a', 1), ('b', 2)])
        stream = BytesIO()
        savemat(stream, {'dict': d})
        stream.seek(0)
        vals = loadmat(stream)['dict']
        assert_equal(set(vals.dtype.names), set(['a', 'b']))
        if is_ordered:  # Input was ordered, output in ab order
            assert_array_equal(vals, ab_exp)
        else:  # Not ordered input, either order output
            if vals.dtype.names[0] == 'a':
                assert_array_equal(vals, ab_exp)
            else:
                assert_array_equal(vals, ba_exp)
Example #7
0
def test_read_opts():
    # tests if read is seeing option sets, at initialization and after
    # initialization
    arr = np.arange(6).reshape(1,6)
    stream = BytesIO()
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    back_dict = rdr.get_variables()
    rarr = back_dict['a']
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, squeeze_me=True)
    assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
    rdr.squeeze_me = False
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, byte_order=boc.native_code)
    assert_array_equal(rdr.get_variables()['a'], arr)
    # inverted byte code leads to error on read because of swapped
    # header etc
    rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
    assert_raises(Exception, rdr.get_variables)
    rdr.byte_order = boc.native_code
    assert_array_equal(rdr.get_variables()['a'], arr)
    arr = np.array(['a string'])
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    assert_array_equal(rdr.get_variables()['a'], arr)
    rdr = MatFile5Reader(stream, chars_as_strings=False)
    carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
    assert_array_equal(rdr.get_variables()['a'], carr)
    rdr.chars_as_strings = True
    assert_array_equal(rdr.get_variables()['a'], arr)
Example #8
0
 def __init__(self, buf):
     self._progress = 0
     self._len = len(buf)
     self._bar = None
     if self._len > 4096:
         self._bar = progress.Bar(filled_char='=', every=4096)
     BytesIO.__init__(self, buf)
Example #9
0
    def _serialize_properties(self):
        """serialize the 'properties' attribute (a dictionary) into
        the raw bytes making up a set of property flags and a
        property list, suitable for putting into a content frame header."""
        shift = 15
        flag_bits = 0
        flags = []
        sformat, svalues = [], []
        props = self.properties
        props.setdefault('content_encoding', 'utf-8')
        for key, proptype in self.PROPERTIES:
            val = props.get(key, None)
            if val is not None:
                if shift == 0:
                    flags.append(flag_bits)
                    flag_bits = 0
                    shift = 15

                flag_bits |= (1 << shift)
                if proptype != 'bit':
                    sformat.append(proptype)
                    svalues.append(val)

            shift -= 1
        flags.append(flag_bits)
        result = BytesIO()
        write = result.write
        for flag_bits in flags:
            write(pack('>H', flag_bits))
        write(dumps(''.join(sformat), svalues))
        return result.getvalue()
Example #10
0
def get_compressed_file_data(file_path, compresslevel=5):
    compressed_buffer = BytesIO()

    gzip_file = GzipFile(mode='wb',
                         compresslevel=compresslevel,
                         fileobj=compressed_buffer)

    try:
        fileobj = open(file_path, 'rb')
        while True:
            x = fileobj.read(65536)
            if not x:
                break
            gzip_file.write(x)
            x = None
        fileobj.close()
    except IOError as e:
        LOG.error(str(e))
        return None

    gzip_file.close()

    compressed_data = compressed_buffer.getvalue()
    compressed_buffer.close()

    return compressed_data
    def open(self, name, mode='rb'):
        resp = self.b2.download_file(name)

        output = BytesIO()
        output.write(resp)
        output.seek(0)
        return File(output, name)
Example #12
0
class FakePayload(object):
    """
    A wrapper around BytesIO that restricts what can be read since data from
    the network can't be seeked and cannot be read outside of its content
    length. This makes sure that views can't do anything under the test client
    that wouldn't work in Real Life.
    """
    def __init__(self, content=None):
        self.__content = BytesIO()
        self.__len = 0
        self.read_started = False
        if content is not None:
            self.write(content)

    def __len__(self):
        return self.__len

    def read(self, num_bytes=None):
        if not self.read_started:
            self.__content.seek(0)
            self.read_started = True
        if num_bytes is None:
            num_bytes = self.__len or 0
        assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
        content = self.__content.read(num_bytes)
        self.__len -= num_bytes
        return content

    def write(self, content):
        if self.read_started:
            raise ValueError("Unable to write a payload after he's been read")
        content = force_bytes(content)
        self.__content.write(content)
        self.__len += len(content)
Example #13
0
class TestFileUploadParser(TestCase):
    def setUp(self):
        class MockRequest(object):
            pass
        from io import BytesIO
        self.stream = BytesIO(
            "Test text file".encode('utf-8')
        )
        request = MockRequest()
        request.upload_handlers = (MemoryFileUploadHandler(),)
        request.META = {
            'HTTP_CONTENT_DISPOSITION': 'Content-Disposition: inline; filename=file.txt'.encode('utf-8'),
            'HTTP_CONTENT_LENGTH': 14,
        }
        self.parser_context = {'request': request, 'kwargs': {}}

    def test_parse(self):
        """ Make sure the `QueryDict` works OK """
        parser = FileUploadParser()
        self.stream.seek(0)
        data_and_files = parser.parse(self.stream, None, self.parser_context)
        file_obj = data_and_files.files['file']
        self.assertEqual(file_obj._size, 14)

    def test_get_filename(self):
        parser = FileUploadParser()
        filename = parser.get_filename(self.stream, None, self.parser_context)
        self.assertEqual(filename, 'file.txt'.encode('utf-8'))
Example #14
0
def deserialize(schema, binary):
    bytes_writer = BytesIO()
    bytes_writer.write(binary)
    bytes_writer.seek(0)

    res = fastavro.schemaless_reader(bytes_writer, schema)
    return res
    def download_sentiments(self):
        print("Download sentiment data from webservice")
        f1 = open('../tmp/results.txt', 'w+')

        for i in range(self.data[:, ].shape[0]):
            print("Downloading entry " + str(i))
            escaped = re.escape(self.data[i, 1])

            post_data = {'text': escaped}

            buffer = BytesIO()
            c = pycurl.Curl()
            c.setopt(c.URL, 'http://text-processing.com/api/sentiment/')
            c.setopt(c.POSTFIELDS, urllib.urlencode(post_data))
            c.setopt(c.WRITEDATA, buffer)
            c.perform()
            c.close()

            body = buffer.getvalue()
            # Body is a byte string.
            # We have to know the encoding in order to print it to a text file
            # such as standard output.
            response = body.decode('utf-8')
            self.results.append(response)

        f1.write(str(self.results))
Example #16
0
def roundtrip_truncated(arr):
    f = BytesIO()
    format.write_array(f, arr)
    #BytesIO is one byte short
    f2 = BytesIO(f.getvalue()[0:-1])
    arr2 = format.read_array(f2)
    return arr2
Example #17
0
def _save(im, fp, filename):
    fp.write(_MAGIC)  # (2+2)
    sizes = im.encoderinfo.get("sizes",
                               [(16, 16), (24, 24), (32, 32), (48, 48),
                                (64, 64), (128, 128), (255, 255)])
    width, height = im.size
    filter(lambda x: False if (x[0] > width or x[1] > height or
                               x[0] > 255 or x[1] > 255) else True, sizes)
    sizes = sorted(sizes, key=lambda x: x[0])
    fp.write(struct.pack("H", len(sizes)))  # idCount(2)
    offset = fp.tell() + len(sizes)*16
    for size in sizes:
        width, height = size
        fp.write(struct.pack("B", width))  # bWidth(1)
        fp.write(struct.pack("B", height))  # bHeight(1)
        fp.write(b"\0")  # bColorCount(1)
        fp.write(b"\0")  # bReserved(1)
        fp.write(b"\0\0")  # wPlanes(2)
        fp.write(struct.pack("H", 32))  # wBitCount(2)

        image_io = BytesIO()
        tmp = im.copy()
        tmp.thumbnail(size, Image.ANTIALIAS)
        tmp.save(image_io, "png")
        image_io.seek(0)
        image_bytes = image_io.read()
        bytes_len = len(image_bytes)
        fp.write(struct.pack("I", bytes_len))  # dwBytesInRes(4)
        fp.write(struct.pack("I", offset))  # dwImageOffset(4)
        current = fp.tell()
        fp.seek(offset)
        fp.write(image_bytes)
        offset = offset + bytes_len
        fp.seek(current)
Example #18
0
    class UnicodeWriter:
        """
        A CSV writer which will write rows to CSV file "f",
        which is encoded in the given encoding.
        """

        def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
            # Redirect output to a queue
            self.queue = BytesIO()
            self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
            self.stream = f
            self.encoder = codecs.getincrementalencoder(encoding)('replace')

        def writerow(self, row):
            row = [smart_text(s) for s in row]
            self.writer.writerow([s.encode("utf-8") for s in row])
            # Fetch UTF-8 output from the queue ...
            data = self.queue.getvalue()
            data = data.decode("utf-8")
            # ... and reencode it into the target encoding
            data = self.encoder.encode(data)
            # write to the target stream
            self.stream.write(data)
            # empty queue
            self.queue.truncate(0)

        def writerows(self, rows):
            for row in rows:
                self.writerow(row)
Example #19
0
    def to_pptx(self):
        logger.info('Converting svg -> html -> png -> pptx')
        content = None
        try:
            # convert to png
            png_fn = self._rasterize_png()

            # create blank presentation slide layout
            pres = Presentation()
            blank_slidelayout = pres.slide_layouts[6]
            slide = pres.slides.add_slide(blank_slidelayout)

            self._pptx_add_title(slide)
            self._pptx_add_url(slide)
            self._pptx_add_png(slide, png_fn)
            self._pptx_add_hawc_logo(slide)

            # save as object
            content = BytesIO()
            pres.save(content)
            content.seek(0)

        except Exception as e:
            logger.error(e, exc_info=True)
        finally:
            self.cleanup()

        return content
Example #20
0
    def test_commit_serialization(self):
        assert_commit_serialization(self.gitrwrepo, self.gitrwrepo.head, True)

        rwrepo = self.gitrwrepo
        make_object = rwrepo.odb.store
        # direct serialization - deserialization can be tested afterwards
        # serialization is probably limited on IO
        hc = rwrepo.commit(rwrepo.head)

        nc = 5000
        st = time()
        for i in xrange(nc):
            cm = Commit(rwrepo, Commit.NULL_BIN_SHA, hc.tree,
                        hc.author, hc.authored_date, hc.author_tz_offset,
                        hc.committer, hc.committed_date, hc.committer_tz_offset,
                        str(i), parents=hc.parents, encoding=hc.encoding)

            stream = BytesIO()
            cm._serialize(stream)
            slen = stream.tell()
            stream.seek(0)

            cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
        # END commit creation
        elapsed = time() - st

        print("Serialized %i commits to loose objects in %f s ( %f commits / s )"
              % (nc, elapsed, nc / elapsed), file=sys.stderr)
Example #21
0
def test_write_struct():
    b = BytesIO()
    item = TItem(id=123, phones=['123456', 'abcdef'])
    proto.TCyBinaryProtocol(b).write_struct(item)
    assert_equal("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
                 "06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00",
                 hexlify(b.getvalue()))
Example #22
0
 def test_object_diff_bin_blob_force(self):
     f = BytesIO()
     # Prepare two slightly different PNG headers
     b1 = Blob.from_string(
         b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52"
         b"\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x04\x00\x00\x00\x05\x04\x8b"
     )
     b2 = Blob.from_string(
         b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52"
         b"\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x03\x00\x00\x00\x98\xd3\xb3"
     )
     store = MemoryObjectStore()
     store.add_objects([(b1, None), (b2, None)])
     write_object_diff(f, store, (b"foo.png", 0o644, b1.id), (b"bar.png", 0o644, b2.id), diff_binary=True)
     self.assertEqual(
         [
             b"diff --git a/foo.png b/bar.png",
             b"index f73e47d..06364b7 644",
             b"--- a/foo.png",
             b"+++ b/bar.png",
             b"@@ -1,4 +1,4 @@",
             b" \x89PNG",
             b" \x1a",
             b" \x00\x00\x00",
             b"-IHDR\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x04\x00\x00\x00\x05\x04\x8b",
             b"\\ No newline at end of file",
             b"+IHDR\x00\x00\x01\xd5\x00\x00\x00\x9f\x08\x03\x00\x00\x00\x98\xd3\xb3",
             b"\\ No newline at end of file",
         ],
         f.getvalue().splitlines(),
     )
Example #23
0
 def _process(self):
     f = request.files['logo']
     try:
         img = Image.open(f)
     except IOError:
         flash(_('You cannot upload this file as a logo.'), 'error')
         return jsonify_data(content=None)
     if img.format.lower() not in {'jpeg', 'png', 'gif'}:
         flash(_('The file has an invalid format ({format})').format(format=img.format), 'error')
         return jsonify_data(content=None)
     if img.mode == 'CMYK':
         flash(_('The logo you uploaded is using the CMYK colorspace and has been converted to RGB. Please check if '
                 'the colors are correct and convert it manually if necessary.'), 'warning')
         img = img.convert('RGB')
     image_bytes = BytesIO()
     img.save(image_bytes, 'PNG')
     image_bytes.seek(0)
     content = image_bytes.read()
     self.event.logo = content
     self.event.logo_metadata = {
         'hash': crc32(content),
         'size': len(content),
         'filename': os.path.splitext(secure_filename(f.filename, 'logo'))[0] + '.png',
         'content_type': 'image/png'
     }
     flash(_('New logo saved'), 'success')
     logger.info("New logo '%s' uploaded by %s (%s)", f.filename, session.user, self.event)
     return jsonify_data(content=get_logo_data(self.event))
Example #24
0
    def process(self, response:Response, responseCnt:ResponseContent, **keyargs):
        '''
        @see: HandlerProcessorProceed.process
        '''
        assert isinstance(response, Response), 'Invalid response %s' % response
        assert isinstance(responseCnt, ResponseContent), 'Invalid response content %s' % responseCnt

        if response.isSuccess is False: return  # Skip in case the response is in error
        if Response.encoder not in response: return  # Skip in case there is no encoder to render
        assert callable(response.renderFactory), 'Invalid response renderer factory %s' % response.renderFactory

        output = BytesIO()
        render = response.renderFactory(output)
        assert isinstance(render, IRender), 'Invalid render %s' % render

        resolve = Resolve(response.encoder).request(value=response.obj, render=render, **response.encoderData or {})

        if not self.allowChunked and ResponseContent.length not in responseCnt:
    
            while resolve.has(): resolve.do()
            content = output.getvalue()
            responseCnt.length = len(content)
            responseCnt.source = (content,)
            output.close()
        else:
            responseCnt.source = self.renderAsGenerator(resolve, output, self.bufferSize)
def create_dummy_image():
    file = BytesIO()
    image = Image.new('RGBA', size=(50, 50), color=(155, 0, 0))
    image.save(file, 'png')
    file.name = 'test_image.png'
    file.seek(0)
    return file
Example #26
0
def overwrite_file(node_id, file_name):
    params = '?suppress=deduplication'  # suppresses 409 response

    buffer = BytesIO()
    c = pycurl.Curl()
    c.setopt(c.URL, oauth.get_content_url() + 'nodes/' + node_id + '/content' + params)
    c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()])
    c.setopt(c.WRITEDATA, buffer)
    c.setopt(c.HTTPPOST, [('content', (c.FORM_FILE, file_name.encode('UTF-8')))])
    c.setopt(c.CUSTOMREQUEST, 'PUT')
    c.setopt(c.NOPROGRESS, 0)
    c.setopt(c.PROGRESSFUNCTION, progress)
    if logger.getEffectiveLevel() == logging.DEBUG:
        c.setopt(c.VERBOSE, 1)

    try:
        c.perform()
    except pycurl.error as e:
        raise RequestError(0, e)

    status = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    print()  # break progress line

    body = buffer.getvalue().decode('utf-8')

    if status != http.OK:
        # print('Overwriting "%s" failed.' % file_name)
        raise RequestError(status, body)

    return json.loads(body)
Example #27
0
def resguardoPdf(request, pk):

    resguardo = Resguardo.objects.get(id=pk)
    nombre = 'resguardo_' + str(resguardo.id)
    response = HttpResponse(content_type='application/pdf')
    response['Content-Disposition'] = 'attachment;filename=resguardo.pdf'
    buffer = BytesIO()
    c = canvas.Canvas(buffer, pagesize=letter)

    def header():

        c.drawImage('static/images/CFPPCH.png', 10, 670, 130, 130)
        c.setLineWidth(.3)
        c.setFont('Helvetica-Bold', 20)
        c.drawString(120, 750, 'CEFPP')
        c.drawString(160, 740, )

        c.setFont('Helvetica-Bold', 15)
        c.drawString(480, 750, 'Inventario')

    c.setFillColorRGB(255, 0, 0)
    c.setFont('Helvetica', 12)
    c.drawString(485, 735, resguardo.inventario)

    c.line(460, 747, 560, 747)

    header()

    c.showPage()
    c.save()
    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response
Example #28
0
 def test_simple_bytesio(self):
     f = BytesIO()
     c = Commit()
     c.committer = c.author = b"Jelmer <*****@*****.**>"
     c.commit_time = c.author_time = 1271350201
     c.commit_timezone = c.author_timezone = 0
     c.message = b"This is the first line\nAnd this is the second line.\n"
     c.tree = Tree().id
     write_commit_patch(f, c, b"CONTENTS", (1, 1), version="custom")
     f.seek(0)
     lines = f.readlines()
     self.assertTrue(lines[0].startswith(b"From 0b0d34d1b5b596c928adc9a727a4b9e03d025298"))
     self.assertEqual(lines[1], b"From: Jelmer <*****@*****.**>\n")
     self.assertTrue(lines[2].startswith(b"Date: "))
     self.assertEqual(
         [
             b"Subject: [PATCH 1/1] This is the first line\n",
             b"And this is the second line.\n",
             b"\n",
             b"\n",
             b"---\n",
         ],
         lines[3:8],
     )
     self.assertEqual([b"CONTENTS-- \n", b"custom\n"], lines[-2:])
     if len(lines) >= 12:
         # diffstat may not be present
         self.assertEqual(lines[8], b" 0 files changed\n")
Example #29
0
def upload_file(file_name, parent=None):
    params = '?suppress=deduplication'  # suppresses 409 response

    metadata = {'kind': 'FILE', 'name': os.path.basename(file_name)}
    if parent:
        metadata['parents'] = [parent]

    buffer = BytesIO()
    c = pycurl.Curl()
    c.setopt(c.URL, oauth.get_content_url() + 'nodes' + params)
    c.setopt(c.HTTPHEADER, ['Authorization: ' + oauth.get_auth_token()])
    c.setopt(c.WRITEDATA, buffer)
    c.setopt(c.HTTPPOST, [('metadata', json.dumps(metadata)),
                          ('content', (c.FORM_FILE, file_name.encode('UTF-8')))])
    c.setopt(c.NOPROGRESS, 0)
    c.setopt(c.PROGRESSFUNCTION, progress)
    if logger.getEffectiveLevel() == logging.DEBUG:
        c.setopt(c.VERBOSE, 1)

    try:
        c.perform()
    except pycurl.error as e:
        raise RequestError(0, e)

    status = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    print()  # break progress line

    body = buffer.getvalue().decode('utf-8')

    if status != http.CREATED:
        # print('Uploading "%s" failed.' % file_name)
        raise RequestError(status, body)

    return json.loads(body)
Example #30
0
 def test_authoritativeMessage(self):
     """
     The L{RRHeader} instances created by L{Message} from an authoritative
     message are marked as authoritative.
     """
     buf = BytesIO()
     answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0))
     answer.encode(buf)
     data = (
         b'\x01\x00' # Message ID
         # answer bit, opCode nibble, auth bit, trunc bit, recursive bit
         b'\x04'
         # recursion bit, empty bit, empty bit, empty bit, response code
         # nibble
         b'\x00'
         b'\x00\x00' # number of queries
         b'\x00\x01' # number of answers
         b'\x00\x00' # number of authorities
         b'\x00\x00' # number of additionals
         + buf.getvalue()
         )
     answer.auth = True
     self.parser.updateData(data)
     message = self.parser.message()
     self.assertEqual(message.answers, [answer])
     self.assertTrue(message.answers[0].auth)
Example #31
0
class Renderer(object):
    """Helper class for building DNS wire-format messages.
    Most applications can use the higher-level L{thirdparty.dns.message.Message}
    class and its to_wire() method to generate wire-format messages.
    This class is for those applications which need finer control
    over the generation of messages.
    Typical use::
        r = thirdparty.dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
        r.add_question(qname, qtype, qclass)
        r.add_rrset(thirdparty.dns.renderer.ANSWER, rrset_1)
        r.add_rrset(thirdparty.dns.renderer.ANSWER, rrset_2)
        r.add_rrset(thirdparty.dns.renderer.AUTHORITY, ns_rrset)
        r.add_edns(0, 0, 4096)
        r.add_rrset(thirdparty.dns.renderer.ADDTIONAL, ad_rrset_1)
        r.add_rrset(thirdparty.dns.renderer.ADDTIONAL, ad_rrset_2)
        r.write_header()
        r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
        wire = r.get_wire()
    output, a BytesIO, where rendering is written
    id: the message id
    flags: the message flags
    max_size: the maximum size of the message
    origin: the origin to use when rendering relative names
    compress: the compression table
    section: an int, the section currently being rendered
    counts: list of the number of RRs in each section
    mac: the MAC of the rendered message (if TSIG was used)
    """

    def __init__(self, id=None, flags=0, max_size=65535, origin=None):
        """Initialize a new renderer."""

        self.output = BytesIO()
        if id is None:
            self.id = random.randint(0, 65535)
        else:
            self.id = id
        self.flags = flags
        self.max_size = max_size
        self.origin = origin
        self.compress = {}
        self.section = QUESTION
        self.counts = [0, 0, 0, 0]
        self.output.write(b'\x00' * 12)
        self.mac = ''

    def _rollback(self, where):
        """Truncate the output buffer at offset *where*, and remove any
        compression table entries that pointed beyond the truncation
        point.
        """

        self.output.seek(where)
        self.output.truncate()
        keys_to_delete = []
        for k, v in self.compress.items():
            if v >= where:
                keys_to_delete.append(k)
        for k in keys_to_delete:
            del self.compress[k]

    def _set_section(self, section):
        """Set the renderer's current section.
        Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
        ADDITIONAL.  Sections may be empty.
        Raises thirdparty.dns.exception.FormError if an attempt was made to set
        a section value less than the current section.
        """

        if self.section != section:
            if self.section > section:
                raise thirdparty.dns.exception.FormError
            self.section = section

    def add_question(self, qname, rdtype, rdclass=thirdparty.dns.rdataclass.IN):
        """Add a question to the message."""

        self._set_section(QUESTION)
        before = self.output.tell()
        qname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack("!HH", rdtype, rdclass))
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[QUESTION] += 1

    def add_rrset(self, section, rrset, **kw):
        """Add the rrset to the specified section.
        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.
        """

        self._set_section(section)
        before = self.output.tell()
        n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[section] += n

    def add_rdataset(self, section, name, rdataset, **kw):
        """Add the rdataset to the specified section, using the specified
        name as the owner name.
        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.
        """

        self._set_section(section)
        before = self.output.tell()
        n = rdataset.to_wire(name, self.output, self.compress, self.origin,
                             **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[section] += n

    def add_edns(self, edns, ednsflags, payload, options=None):
        """Add an EDNS OPT record to the message."""

        # make sure the EDNS version in ednsflags agrees with edns
        ednsflags &= 0xFF00FFFF
        ednsflags |= (edns << 16)
        self._set_section(ADDITIONAL)
        before = self.output.tell()
        self.output.write(struct.pack('!BHHIH', 0, thirdparty.dns.rdatatype.OPT, payload,
                                      ednsflags, 0))
        if options is not None:
            lstart = self.output.tell()
            for opt in options:
                stuff = struct.pack("!HH", opt.otype, 0)
                self.output.write(stuff)
                start = self.output.tell()
                opt.to_wire(self.output)
                end = self.output.tell()
                assert end - start < 65536
                self.output.seek(start - 2)
                stuff = struct.pack("!H", end - start)
                self.output.write(stuff)
                self.output.seek(0, 2)
            lend = self.output.tell()
            assert lend - lstart < 65536
            self.output.seek(lstart - 2)
            stuff = struct.pack("!H", lend - lstart)
            self.output.write(stuff)
            self.output.seek(0, 2)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[ADDITIONAL] += 1

    def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
                 request_mac, algorithm=thirdparty.dns.tsig.default_algorithm):
        """Add a TSIG signature to the message."""

        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = thirdparty.dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    algorithm=algorithm)
        self._write_tsig(tsig_rdata, keyname)

    def add_multi_tsig(self, ctx, keyname, secret, fudge, id, tsig_error,
                       other_data, request_mac,
                       algorithm=thirdparty.dns.tsig.default_algorithm):
        """Add a TSIG signature to the message. Unlike add_tsig(), this can be
        used for a series of consecutive DNS envelopes, e.g. for a zone
        transfer over TCP [RFC2845, 4.4].
        For the first message in the sequence, give ctx=None. For each
        subsequent message, give the ctx that was returned from the
        add_multi_tsig() call for the previous message."""

        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = thirdparty.dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    ctx=ctx,
                                                    first=ctx is None,
                                                    multi=True,
                                                    algorithm=algorithm)
        self._write_tsig(tsig_rdata, keyname)
        return ctx

    def _write_tsig(self, tsig_rdata, keyname):
        self._set_section(ADDITIONAL)
        before = self.output.tell()

        keyname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack('!HHIH', thirdparty.dns.rdatatype.TSIG,
                                      thirdparty.dns.rdataclass.ANY, 0, 0))
        rdata_start = self.output.tell()
        self.output.write(tsig_rdata)

        after = self.output.tell()
        assert after - rdata_start < 65536
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig

        self.output.seek(rdata_start - 2)
        self.output.write(struct.pack('!H', after - rdata_start))
        self.counts[ADDITIONAL] += 1
        self.output.seek(10)
        self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
        self.output.seek(0, 2)

    def write_header(self):
        """Write the DNS message header.
        Writing the DNS message header is done after all sections
        have been rendered, but before the optional TSIG signature
        is added.
        """

        self.output.seek(0)
        self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
                                      self.counts[0], self.counts[1],
                                      self.counts[2], self.counts[3]))
        self.output.seek(0, 2)

    def get_wire(self):
        """Return the wire format message."""

        return self.output.getvalue()
Example #32
0
def main(year, etn):
    """Go Main Go"""
    pcursor = POSTGIS.cursor(cursor_factory=psycopg2.extras.DictCursor)

    basefn = "watch_%s_%s" % (year, etn)

    os.chdir("/tmp/")

    sql = """select
        ST_astext(ST_multi(ST_union(ST_SnapToGrid(u.geom,0.0001)))) as tgeom
        from warnings_%s w JOIN ugcs u on (u.gid = w.gid)
        WHERE significance = 'A'
        and phenomena IN ('TO','SV') and eventid = %s and
        ST_isvalid(u.geom)
        and issue < ((select issued from watches WHERE num = %s
        and extract(year from issued) = %s LIMIT 1) + '60 minutes'::interval)
    """ % (year, etn, etn, year)
    pcursor.execute(sql)
    if pcursor.rowcount == 0:
        sys.exit()

    shpio = BytesIO()
    shxio = BytesIO()
    dbfio = BytesIO()
    with shapefile.Writer(shx=shxio, shp=shpio, dbf=dbfio) as shp:
        shp.field('SIG', 'C', '1')
        shp.field('ETN', 'I', '4')

        row = pcursor.fetchone()
        s = row["tgeom"]
        f = wellknowntext.convert_well_known_text(s)
        shp.poly(f)
        shp.record('A', etn)

    zio = BytesIO()
    zf = zipfile.ZipFile(zio, mode='w',
                         compression=zipfile.ZIP_DEFLATED)
    zf.writestr(basefn+'.prj',
                open(('/opt/iem/data/gis/meta/4326.prj'
                      )).read())
    zf.writestr(basefn+".shp", shpio.getvalue())
    zf.writestr(basefn+'.shx', shxio.getvalue())
    zf.writestr(basefn+'.dbf', dbfio.getvalue())
    zf.close()
    ssw(("Content-Disposition: attachment; filename=%s.zip\n\n") % (basefn,))
    ssw(zio.getvalue())
 def compress(inpil):
     pil = Image.open(inpil)
     pil.thumbnail((size, size), Image.LANCZOS)
     pil.save(outpil := BytesIO(), "PNG", optimize=True)
     return outpil
Example #34
0
def serialize(schema, *records):
    buffer = BytesIO()
    fastavro.writer(buffer, schema, records)
    serialized = buffer.getvalue()
    return serialized
Example #35
0
def import_data(bot: Bot, update):
    msg = update.effective_message  # type: Optional[Message]
    chat = update.effective_chat  # type: Optional[Chat]
    user = update.effective_user  # type: Optional[User]
    # TODO: allow uploading doc with command, not just as reply
    # only work with a doc
    conn = connected(bot, update, chat, user.id, need_admin=True)
    if conn:
        chat = dispatcher.bot.getChat(conn)
        chat_id = conn
        chat_name = dispatcher.bot.getChat(conn).title
    else:
        if update.effective_message.chat.type == "private":
            update.effective_message.reply_text(
                "This command can only be runned on group, not PM.")
            return ""
        chat = update.effective_chat
        chat_id = update.effective_chat.id
        chat_name = update.effective_message.chat.title

    if msg.reply_to_message and msg.reply_to_message.document:
        filetype = msg.reply_to_message.document.file_name
        if filetype.split('.')[-1] not in ("backup", "json", "txt"):
            msg.reply_text("File is not valid!")
            return
        try:
            file_info = bot.get_file(msg.reply_to_message.document.file_id)
        except BadRequest:
            msg.reply_text(
                "Try downloading and uploading the file yourself again, This one seem broken!"
            )
            return

        with BytesIO() as file:
            file_info.download(out=file)
            file.seek(0)
            data = json.load(file)

        try:
            # If backup is from Monica
            if data.get('bot_base') == "Monica":
                imp_notes = 0
                NOT_IMPORTED = "This cannot be imported because from other bot."
                NOT_IMPORTED_INT = 0
                # If backup is from this bot, import all files
                if data.get('bot_id') == bot.id:
                    is_self = True
                else:
                    is_self = False

                    # Import notes
                if data.get('notes'):
                    allnotes = data['notes']
                    NOT_IMPORTED += "\n\nNotes:\n"
                    for x in allnotes:
                        # If from self, import all
                        if is_self:
                            note_data, buttons = button_markdown_parser(
                                x['note_data'], entities=0)
                            note_name = x['note_tag']
                            note_file = None
                            note_type = x['note_type']
                            if x['note_file']:
                                note_file = x['note_file']
                            if note_type == 0:
                                note_type = Types.TEXT
                            elif note_type == 1:
                                note_type = Types.BUTTON_TEXT
                            elif note_type == 2:
                                note_type = Types.STICKER
                            elif note_type == 3:
                                note_type = Types.DOCUMENT
                            elif note_type == 4:
                                note_type = Types.PHOTO
                            elif note_type == 5:
                                note_type = Types.AUDIO
                            elif note_type == 6:
                                note_type = Types.VOICE
                            elif note_type == 7:
                                note_type = Types.VIDEO
                            elif note_type == 8:
                                note_type = Types.VIDEO_NOTE
                            else:
                                note_type = None
                            if note_type <= 8:
                                notesql.add_note_to_db(chat_id, note_name,
                                                       note_data, note_type,
                                                       buttons, note_file)
                                imp_notes += 1
                        else:
                            # If this text
                            if x['note_type'] == 0:
                                note_data, buttons = button_markdown_parser(
                                    x['text'].replace("\\", ""), entities=0)
                                note_name = x['name']
                                notesql.add_note_to_db(chat_id, note_name,
                                                       note_data, Types.TEXT,
                                                       buttons, None)
                                imp_notes += 1
                            else:
                                NOT_IMPORTED += "- {}\n".format(x['name'])
                                NOT_IMPORTED_INT += 1

                if conn:
                    text = (update.effective_message,
                            "Full backup returned on *{}*. Welcome backup! "
                            ).format(chat_name)
                else:
                    text = (
                        update.effective_message,
                        "Backup fully restored.\nDone with welcome backup! "
                    ).format(chat_name)
                try:
                    msg.reply_text(text, parse_mode="markdown")
                except BadRequest:
                    msg.reply_text(text, parse_mode="markdown", quote=False)
                if NOT_IMPORTED_INT:
                    f = open("{}-notimported.txt".format(chat_id), "w")
                    f.write(str(NOT_IMPORTED))
                    f.close()
                    bot.sendDocument(
                        chat_id,
                        document=open('{}-notimported.txt'.format(chat_id),
                                      'rb'),
                        caption=tl(update.effective_message,
                                   "*Data that can't be imported*"),
                        timeout=360,
                        parse_mode=ParseMode.MARKDOWN)
                    os.remove("{}-notimported.txt".format(chat_id))
                return
        except Exception as err:
            msg.reply_text(tl(
                update.effective_message,
                "An error has occurred getting Monica backup!\nGo, ping [my owner](https://t.me/kingofelephants) and ask if any solution of it!\n\nMaybe they can resolve your issue!"
            ),
                           parse_mode="markdown")
            LOGGER.exception("An error when importing from Julie base!")
            return

        # only import one group
        if len(data) > 1 and str(chat.id) not in data:
            msg.reply_text(
                "There are more than one group in this file and the chat.id is not same! How am i supposed to import it?"
            )
            return

        # Check if backup is this chat
        try:
            if data.get(str(chat.id)) == None:
                if conn:
                    text = "Backup comes from another chat, I can't return another chat to chat *{}*".format(
                        chat_name)
                else:
                    text = "Backup comes from another chat, I can't return another chat to this chat"
                return msg.reply_text(text, parse_mode="markdown")
        except:
            return msg.reply_text(
                "There is problem while importing the data! Please ask in @HarukaAyaGroup about why this happened."
            )
        # Check if backup is from self
        try:
            if str(bot.id) != str(data[str(chat.id)]['bot']):
                return msg.reply_text(
                    "Backup from another bot that is not suggested might cause the problem, documents, photos, videos, audios, records might not work as it should be. However, You can still request a feature regarding this in @HarukaAyaGroup !"
                )
        except:
            pass
        # Select data source
        if str(chat.id) in data:
            data = data[str(chat.id)]['hashes']
        else:
            data = data[list(data.keys())[0]]['hashes']

        try:
            for mod in DATA_IMPORT:
                mod.__import_data__(str(chat.id), data)
        except Exception:
            msg.reply_text(
                "An error occurred while recovering your data. The process failed. If you experience a problem with this, please ask in @HarukaAyaGroup . My owner and community will be happy to help. Also, bugs report makes me even better!\nThank you!"
            )

            LOGGER.exception("Imprt for the chat %s with the name %s failed.",
                             str(chat.id), str(chat.title))
            return

        # TODO: some of that link logic
        # NOTE: consider default permissions stuff?
        if conn:

            text = "Backup fully restored on *{}*.".format(chat_name)
        else:
            text = "Backup fully restored"
        msg.reply_text(text, parse_mode="markdown")
Example #36
0
async def analyze(request):
    img_data = await request.form()
    img_bytes = await (img_data['file'].read())
    img = open_image(BytesIO(img_bytes))
    prediction = learn.predict(img)[0]
    return JSONResponse({'result': str(prediction)})
Example #37
0
class seek_wrapper:
	"""Adds a seek method to a file object.

	This is only designed for seeking on readonly file-like objects.

	Wrapped file-like object must have a read method.  The readline method is
	only supported if that method is present on the wrapped object.  The
	readlines method is always supported.  xreadlines and iteration are
	supported only for Python 2.2 and above.

	Public attributes:

	wrapped: the wrapped file object
	is_closed: true iff .close() has been called

	WARNING: All other attributes of the wrapped object (ie. those that are not
	one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
	are passed through unaltered, which may or may not make sense for your
	particular file object.

	"""

	# General strategy is to check that cache is full enough, then delegate to
	# the cache (self.__cache, which is a BytesIO instance).  A seek
	# position (self.__pos) is maintained independently of the cache, in order
	# that a single cache may be shared between multiple seek_wrapper objects.
	# Copying using module copy shares the cache in this way.

	def __init__(self, wrapped):
		self.wrapped = wrapped
		self.__read_complete_state = [False]
		self.__is_closed_state = [False]
		self.__have_readline = hasattr(self.wrapped, "readline")
		self.__cache = BytesIO()
		self.__pos = 0  # seek position

	def invariant(self):
		# The end of the cache is always at the same place as the end of the
		# wrapped file (though the .tell() method is not required to be present
		# on wrapped file).
		return self.wrapped.tell() == len(self.__cache.getvalue())

	def close(self):
		self.wrapped.close()
		self.is_closed = True

	def __getattr__(self, name):
		if name == "is_closed":
			return self.__is_closed_state[0]
		elif name == "read_complete":
			return self.__read_complete_state[0]

		wrapped = self.__dict__.get("wrapped")
		if wrapped:
			return getattr(wrapped, name)

		return getattr(self.__class__, name)

	def __setattr__(self, name, value):
		if name == "is_closed":
			self.__is_closed_state[0] = bool(value)
		elif name == "read_complete":
			if not self.is_closed:
				self.__read_complete_state[0] = bool(value)
		else:
			self.__dict__[name] = value

	def seek(self, offset, whence=0):
		assert whence in [0, 1, 2]

		# how much data, if any, do we need to read?
		if whence == 2:  # 2: relative to end of *wrapped* file
			if offset < 0:
				raise ValueError("negative seek offset")
			# since we don't know yet where the end of that file is, we must
			# read everything
			to_read = None
		else:
			if whence == 0:  # 0: absolute
				if offset < 0:
					raise ValueError("negative seek offset")
				dest = offset
			else:  # 1: relative to current position
				pos = self.__pos
				if pos < offset:
					raise ValueError("seek to before start of file")
				dest = pos + offset
			end = len_of_seekable(self.__cache)
			to_read = dest - end
			if to_read < 0:
				to_read = 0

		if to_read != 0:
			self.__cache.seek(0, 2)
			if to_read is None:
				assert whence == 2
				self.__cache.write(self.wrapped.read())
				self.read_complete = True
				self.__pos = self.__cache.tell() - offset
			else:
				data = self.wrapped.read(to_read)
				if not data:
					self.read_complete = True
				else:
					self.__cache.write(data)
				# Don't raise an exception even if we've seek()ed past the end
				# of .wrapped, since fseek() doesn't complain in that case.
				# Also like fseek(), pretend we have seek()ed past the end,
				# i.e. not:
				# self.__pos = self.__cache.tell()
				# but rather:
				self.__pos = dest
		else:
			self.__pos = dest

	def tell(self):
		return self.__pos

	def __copy__(self):
		cpy = self.__class__(self.wrapped)
		cpy.__cache = self.__cache
		cpy.__read_complete_state = self.__read_complete_state
		cpy.__is_closed_state = self.__is_closed_state
		return cpy

	def get_data(self):
		pos = self.__pos
		try:
			self.seek(0)
			return self.read(-1)
		finally:
			self.__pos = pos

	def read(self, size=-1):
		pos = self.__pos
		end = len_of_seekable(self.__cache)
		available = end - pos

		# enough data already cached?
		if size <= available and size != -1:
			self.__cache.seek(pos)
			self.__pos = pos + size
			return self.__cache.read(size)

		# no, so read sufficient data from wrapped file and cache it
		self.__cache.seek(0, 2)
		if size == -1:
			self.__cache.write(self.wrapped.read())
			self.read_complete = True
		else:
			to_read = size - available
			assert to_read > 0
			data = self.wrapped.read(to_read)
			if not data:
				self.read_complete = True
			else:
				self.__cache.write(data)
		self.__cache.seek(pos)

		data = self.__cache.read(size)
		self.__pos = self.__cache.tell()
		assert self.__pos == pos + len(data)
		return data

	def readline(self, size=-1):
		if not self.__have_readline:
			raise NotImplementedError("no readline method on wrapped object")

		# line we're about to read might not be complete in the cache, so
		# read another line first
		pos = self.__pos
		self.__cache.seek(0, 2)
		data = self.wrapped.readline()
		if not data:
			self.read_complete = True
		else:
			self.__cache.write(data)
		self.__cache.seek(pos)

		data = self.__cache.readline()
		if size != -1:
			r = data[:size]
			self.__pos = pos + size
		else:
			r = data
			self.__pos = pos + len(data)
		return r

	def readlines(self, sizehint=-1):
		pos = self.__pos
		self.__cache.seek(0, 2)
		self.__cache.write(self.wrapped.read())
		self.read_complete = True
		self.__cache.seek(pos)
		data = self.__cache.readlines(sizehint)
		self.__pos = self.__cache.tell()
		return data

	def __iter__(self):
		return self

	def __next__(self):
		line = self.readline()
		if line == "":
			raise StopIteration
		return line
	next = __next__

	xreadlines = __iter__

	def __repr__(self):
		return ("<%s at %s whose wrapped object = %r>" %
				(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
Example #38
0
def send_prediction_on_photo(update, context):
    global storage
    global storage1
    # Нам нужно получить две картинки, чтобы произвести перенос стиля, но каждая картинка приходит в
    # отдельном апдейте, поэтому в простейшем случае мы будем сохранять id первой картинки в память,
    # чтобы, когда уже придет вторая, мы могли загрузить в память уже сами картинки и обработать их.
    # Точно место для улучшения, я бы
    bot = context.bot
    if update.message.text == '/style':
        storage.append('1')
        update.message.reply_text(
            "Пришли 2 фотографии, первая фотография - то, что хочешь изменить. Вторая - стиль, который хочешь получить (любая картина художника)."
        )

    elif len(storage) != 0:
        chat_id = update.message.chat_id
        print("Got image from {}".format(chat_id))

        # получаем информацию о картинке
        image_info = update.message.photo[-1]
        image_file = bot.get_file(image_info)

        if chat_id in first_image_file:
            # первая картинка, которая к нам пришла станет content image, а вторая style image
            print("работает style_transfer")
            content_image_stream = BytesIO()
            first_image_file[chat_id].download(out=content_image_stream)
            del first_image_file[chat_id]

            style_image_stream = BytesIO()
            image_file.download(out=style_image_stream)
            output = model.transfer_style(content_image_stream,
                                          style_image_stream,
                                          num_steps=300)

            # теперь отправим назад фото
            output_stream = BytesIO()  #
            output.save(output_stream, format='PNG')
            output_stream.seek(0)
            bot.send_photo(chat_id, photo=output_stream)
            print("Sent Photo to user")
            storage = []
        else:
            first_image_file[chat_id] = image_file  #

    if update.message.text == '/photo_real':
        storage1.append('1')
        update.message.reply_text(
            "Пришли 2 фотографии, первая фотография - пейзаж. Вторая - стиль, который хочешь перенести на свою фотографию."
        )

    elif len(storage1) != 0:
        chat_id = update.message.chat_id
        print("Got image from {}".format(chat_id))

        # получаем информацию о картинке
        image_info = update.message.photo[-1]
        image_file = bot.get_file(image_info)

        if chat_id in first_image_file:
            # первая картинка, которая к нам пришла станет content image, а вторая style image
            print("работает photo_real")
            content_image_stream = BytesIO()
            first_image_file[chat_id].download(out=content_image_stream)
            del first_image_file[chat_id]

            style_image_stream = BytesIO()
            image_file.download(out=style_image_stream)
            process_stylization.stylization(
                stylization_module=p_wct,
                smoothing_module=p_pro,
                content_image_path=content_image_stream,
                style_image_path=style_image_stream,
                content_seg_path=[],
                style_seg_path=[],
                output_image_path='img.jpg',
                cuda=False,
                save_intermediate=False,
                no_post=False)
            output = Image.open('img.jpg')
            # теперь отправим назад фото
            output_stream = BytesIO()  #
            output.save(output_stream, format='PNG')
            output_stream.seek(0)
            bot.send_photo(chat_id, photo=output_stream)
            storage1 = []
            print("Sent Photo to user")
        else:
            first_image_file[chat_id] = image_file  #
Example #39
0
    def _load(self):
        self.document = WordprocessingDocument(path=None)
        package = self.document.package
        document_part = package.create_part(uri='/word/document.xml', )

        if self.styles_xml:
            self.relationships.append({
                'external':
                False,
                'target_path':
                'styles.xml',
                'data':
                self.styles_xml,
                'relationship_id':
                'styles',
                'relationship_type':
                StyleDefinitionsPart.relationship_type,
            })

        for relationship in self.relationships:
            target_mode = 'Internal'
            if relationship['external']:
                target_mode = 'External'
            target_uri = relationship['target_path']
            if 'data' in relationship:
                full_target_uri = posixpath.join(
                    package.uri,
                    'word',
                    target_uri,
                )
                package.streams[full_target_uri] = BytesIO(
                    relationship['data'], )
                package.create_part(uri=full_target_uri)
            document_part.create_relationship(
                target_uri=target_uri,
                target_mode=target_mode,
                relationship_type=relationship['relationship_type'],
                relationship_id=relationship['relationship_id'],
            )

        package.streams[document_part.uri] = BytesIO(self.document_xml)
        package.create_relationship(
            target_uri=document_part.uri,
            target_mode='Internal',
            relationship_type=MainDocumentPart.relationship_type,
        )

        self.numbering_root = None
        if self.numbering_dict is not None:
            self.numbering_root = parse_xml_from_string(
                DXB.numbering(self.numbering_dict), )

        # This is the standard page width for a word document (in points), Also
        # the page width that we are looking for in the test.
        self.page_width = 612

        self.styles_manager = StylesManager(
            self.document.main_document_part.style_definitions_part, )
        self.styles = self.styles_manager.styles

        self.parse_begin(self.document.main_document_part.root_element)
Example #40
0
async def cameraLoop():

    global continueLoop
    global freshParams
    await asyncio.sleep(1)

    while True:
        try:
            log = logging.getLogger("cameraLoop")

            params = cleanParams(freshParams)
            shootresol = params["shootresol"]
            strtResolution = (shootresol["width"], shootresol["height"])

            log.info("OpeningCamera at resol %s", (strtResolution, ))
            with PiCamera(resolution=strtResolution,
                          framerate_range=(0.1, 30)) as camera:
                log.info("got camera")

                continueLoop = True
                while continueLoop:
                    params = cleanParams(freshParams)
                    shootresol = params["shootresol"]
                    dispresol = (params["dispresol"]["width"],
                                 params["dispresol"]["height"])
                    if (shootresol["width"] != strtResolution[0]):
                        log.info("resolution changed!")
                        break
                    log.info("looping %s", params)

                    capture_format = params["capture_format"]

                    # setting camera config
                    camsetting_start = time.time()

                    camera.iso = params["isovalue"]
                    camera.brightness = params["brightness"]
                    camera.contrast = params["contrast"]
                    # camera.analog_gain=1.0
                    camera.exposure_mode = params["expomode"]
                    camera.exposure_compensation = params[
                        "exposure_compensation"]
                    camera.saturation = params["saturation"]

                    camera.shutter_speed = params["shutterSpeed"]

                    log.info("setting awb_mode off")
                    camera.awb_mode = 'off'

                    g = (params["redgain"], params["bluegain"])
                    camera.awb_gains = g

                    camsetting_dur = time.time() - camsetting_start

                    log.info("create databuff")
                    if (capture_format in ["yuv"]):
                        stream = open("/dev/shm/lol.data", "w+b")
                    else:
                        stream = BytesIO()

                    log.info("capture start capture_format:%s", capture_format)
                    triggerDate = time.time()
                    camera.capture(stream,
                                   format=capture_format,
                                   use_video_port=False)

                    capture_dur = time.time() - triggerDate

                    # into pill
                    log.info("reading image")
                    strtTime = time.time()
                    if (capture_format == "jpeg"):
                        stream.seek(0)
                        image = Image.open(stream)

                    elif (capture_format == "yuv"):
                        stream.seek(0)
                        rgb = imgutils.yuvbytesToRgb(stream, *strtResolution)
                        image = Image.fromarray(rgb)
                    elif (capture_format == "rgb"):
                        stream.seek(0)
                        image = Image.frombytes("RGB", strtResolution,
                                                stream.getvalue(), "raw",
                                                "RGB", 0, 1)
                        #image = Image.frombuffer("RGB",strtResolution,stream,"raw","RGB",0,1)
                    pil_dur = time.time() - strtTime

                    #log.info(image.size)
                    strtTime = time.time()
                    #histData = imgutils.colorHist(image)
                    hist_dur = time.time() - strtTime

                    log.info("resizing")
                    strtTime = time.time()
                    imageDisplay = imgutils.resizeImage(image, dispresol)
                    resize_dur = time.time() - strtTime

                    # save original image
                    strtTime = time.time()
                    save_format = params["save_format"]
                    save_section = params["save_section"]
                    save_subsection = params["save_subsection"]

                    if save_format in [
                            "none",
                    ]:
                        pass
                    else:
                        IMGBUFF.stack((image, save_format, save_section,
                                       save_subsection, triggerDate))

                    save_dur = time.time() - strtTime

                    # publish image to server
                    usedParams = {
                        "triggerDate": triggerDate,
                        "gains": [float(_) for _ in camera.awb_gains],
                        "analog_gain": float(camera.analog_gain),
                        "iso": camera.iso,
                        "brightness": camera.brightness,
                        "saturation": camera.saturation,
                        "contrast": camera.contrast,
                        "exposure_compensation": camera.exposure_compensation,
                        "resolution": list(strtResolution),
                        "imageSize": image.size,
                        "shutterSpeed": camera.shutter_speed,
                        "exposure_speed": camera.exposure_speed,
                        "exposure_mode": camera.exposure_mode,
                        "awb_mode": camera.awb_mode,
                        "capture_format": capture_format,
                        "save_format": save_format,
                        "save_section": save_section,
                        "camsetting_dur": camsetting_dur,
                        "capture_dur": capture_dur,
                        "pil_dur": pil_dur,
                        "hist_dur": hist_dur,
                        "resize_dur": resize_dur,
                        "save_dur": save_dur,
                    }

                    strtTime = time.time()
                    if serverConnection:
                        try:
                            data = imgutils.pilimTobase64Jpg(imageDisplay)
                            msg = json.dumps({
                                "usedParams": usedParams,
                                "msgtype": "srcimage",
                                "imageData": data
                            })
                            await serverConnection.send(msg)
                        except Exception as e:
                            log.exception("err %s", e)
                    else:
                        log.info("no server")
                    # end sending
                    send_dur = time.time() - strtTime

                    # now send timing data
                    timingData = {
                        "triggerDate": triggerDate,
                        "imgbuffcount": len(IMGBUFF.content),
                        "send_dur": send_dur,
                        "usedParams": usedParams
                    }

                    msg = ",".join([
                        "%s: %.2f" % (k, timingData[k])
                        for k in sorted(timingData.keys()) if "dur" in k
                    ])
                    log.info("timing info %s , %s in buff", msg,
                             len(IMGBUFF.content))

                    if serverConnection:
                        msg = makeMessage("camTiming", timingData, jdump=True)
                        await serverConnection.send(msg)
                    await asyncio.sleep(.0001)
                # end while
                log.info("ending capture loop, camera will be reopened")

            #end with camera
            #no camera here

            log.info("camera closed")
        except Exception as e:
            log.exception("whooops")
            await asyncio.sleep(1)
Example #41
0
 def test_stdin_binary_mode(self):
     with mock.patch('sys.stdin') as m_stdin:
         m_stdin.buffer = BytesIO(b'spam, bacon, sausage, and spam')
         fi = FileInput(files=['-'], mode='rb')
         lines = list(fi)
         self.assertEqual(lines, [b'spam, bacon, sausage, and spam'])
Example #42
0
def FromHex(obj, hex_string):
    obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
    return obj
def download_file(url: str) -> str:
    resp = requests.get(url)
    with zipfile.ZipFile(BytesIO(resp.content)) as zip_ref:
        zip_ref.extractall(FILES_DIR)
    return url
import subprocess
import sys

dataSplit = recvData.split()
raspberryPiID = dataSplit[0]
cameraID = dataSplit[1]
photoID = dataSplit[2]

num_df = pd.read_sql(sql="select max(num) from images", con=engine)
num = num_df.values[0][0]

img_df = pd.read_sql(sql="select * from images where num = " + str(num),
                     con=engine)
img_str = img_df['data'].values[0]
img = base64.decodebytes(img_str)
im = Image.open(BytesIO(img))

tempname = 'temp.jpg'
im.save('imgs/' + tempname)

filename = glob('detectplate.py')
subprocess.call([
    'python', filename, '--images', 'imgs/' + tempname, '--det', 'det',
    '--photoID', photoID, '--cameraID', cameraID
])

buffer = BytesIO()
im = Image.open('det/det_' + tempname)

im.save(buffer, format='jpeg')
img_str2 = base64.b64encode(buffer.getvalue())
Example #45
0
 def assertPixelColor(self, response, x, y, color):
     img = Image.open(BytesIO(response.content))
     self.assertEqual(color, img.getpixel((x, y)))
Example #46
0

def log(s):
    # print(s)
    f = open('logs/events.log', 'a')
    date = datetime.now().strftime("%Y-%m-%d\t%H:%M:%S")
    f.writelines(f"[{date}]\t{s}\n")
    f.close()

log("[IMPORTING]\tImports Done!")

lemmatizer = WordNetLemmatizer()
intents = json.loads(open('src/intents.json').read())
words = pickle.load(open('Data/words.pkl', 'rb'))
classes = pickle.load(open('Data/classes.pkl', 'rb'))
mp3_fp = BytesIO()

try:
    model = load_model('Data/model.h5')
    log("[SUCCESS]\tModel loaded.")

except:
    log("[FAILED]\tModel not found.")
    log("[PROCESSING]\tTraining model.")

def speak(audio):
    tts = gTTS(audio, lang='en', tld='co.uk')
    tts.save("Data/temp.mp3")
    os.system("mpg123 -q Data/temp.mp3")

Example #47
0
def test_formdata_file_upload_missing_param(simple_app):
    app_client = simple_app.app.test_client()
    resp = app_client.post(
        '/v1.0/test-formData-file-upload-missing-param',
        data={'missing_formData': (BytesIO(b'file contents'), 'example.txt')})
    assert resp.status_code == 200
Example #48
0
def createPDF(datasetID):
    """
    Get the ID of the dataset protocol and creates a PDF of this protocol using the reportlab tools
    :param datasetID:
    :return: a HTTPResponse PDF object
    """

    # Get all the information of this protocol
    basicInfo = BasicDataset.objects.get(id=datasetID)
    partnerInfo = Partner.objects.filter(dataset_id=datasetID)
    reqInfo = DataReq.objects.filter(dataset_id=datasetID).order_by('taskNr')
    stepInfo = ExpStep.objects.filter(dataset_id=datasetID).order_by('taskNr')
    reportingInfo = Reporting.objects.filter(dataset_id=datasetID).order_by('taskNr')

    # Create the HttpResponse object with the appropriate PDF headers.
    response = HttpResponse(content_type='application/pdf')
    response['Content-Disposition'] = 'attachment; filename=' + basicInfo.shortname + '.pdf'

    styles = {
        'default': ParagraphStyle(
            'default',
            fontName='Times-Roman',
            fontSize=10,
            leading=11,
            leftIndent=0,
            rightIndent=0,
            firstLineIndent=0,
            spaceBefore=0,
            spaceAfter=0,
            textColor= black,
            backColor=None,
            wordWrap=None,
            borderWidth= 0,
            borderPadding= 0,
            borderColor= None,
            borderRadius= None,
            allowWidows= 1,
            allowOrphans= 0,
            endDots=None,
            splitLongWords=1,
        ),

    }

    styles['title1'] = ParagraphStyle(
        'title1',
        parent=styles['default'],
        fontName='Helvetica-Bold',
        fontSize=24,
        leading=30,
        alignment=TA_CENTER,
        textColor=black,
    )

    styles['title2'] = ParagraphStyle(
        'title2',
        parent=styles['default'],
        fontName='Helvetica',
        fontSize=14,
        leading=18,
        leftIndent=5,
        alignment=TA_LEFT,
        textColor=black,
        spaceBefore = 0,
        borderRadius = None,
        firstLineIndent = 0,
        underlineProportion = 0.0,
        rightIndent = 0,
        wordWrap = None,
        allowWidows = 1,
        backColor = Color(.9,.9,.9),
        justifyLastLine = 0,
        textTransform = None,
        justifyBreaks = 0,
        spaceShrinkage = 0.05,
        splitLongWords = 1,
        bulletFontSize = 10,
        borderWidth = 1,
        borderPadding = 2,
        endDots = None,
        spaceAfter = 6,

    )

    styles['label'] = ParagraphStyle(
        'label',
        parent=styles['default'],
        fontName='Times-Bold',
    )

    buffer = BytesIO()
    doc = BaseDocTemplate(buffer)

    doc.addPageTemplates(
        [
            PageTemplate(
                frames=[
                    Frame(
                        doc.leftMargin,
                        doc.bottomMargin,
                        doc.width,
                        doc.height,
                        id=None
                    ),
                ]
            ),
        ]
    )


    # container for the 'Flowable' objects
    story = []

    scriptDir = os.path.dirname(__file__)
    im = Image(os.path.join(scriptDir, "static/img/sologo_new_cropped.png"), width=2.6*cm, height=2*cm)
    im.hAlign = 'RIGHT'

    data = [[im, Paragraph('{}'.format(basicInfo.shortname), styles['title1'])]]

    t=Table(data, hAlign='LEFT', colWidths=[2 * cm, 14 * cm])
    t.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),'TOP')]))
    story.append(t)
    story.append(Spacer(1, 16))


    story.append(Paragraph('Experiment Information', styles['title2']))
    data= [[Paragraph('Full experiment name:', styles['label']), Paragraph(basicInfo.title, styles['default'])],
           [Paragraph('Experiment Idea:', styles['label']), Paragraph(basicInfo.experimentIdea.replace('\n','<br />\n'), styles['default'])],
           [Paragraph('Hypothesis:', styles['label']), Paragraph(basicInfo.hypothesis.replace('\n','<br />\n'), styles['default'])],
           [Paragraph('Research objective:', styles['label']), Paragraph(basicInfo.researchObjective.replace('\n','<br />\n'), styles['default'])]]

    t=Table(data, hAlign='LEFT', colWidths=[4 * cm, 12 * cm])
    t.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),'TOP')]))
    story.append(t)
    story.append(Spacer(1, 24))


    # Partners
    story.append(Paragraph('Partners', styles['title2']))

    for partner in partnerInfo:

        partnerName = Paragraph('{}'.format(partner.name), styles['default'])
        if partner.lead == True:
            partnerName = Paragraph('{} (lead)'.format(partner.name), styles['default'])

        data = [[Paragraph('Name:', styles['label']), partnerName],
            [Paragraph('E-mail:', styles['label']), Paragraph(partner.email, styles['default'])],
           [Paragraph('Organisation:', styles['label']), Paragraph(partner.organisation, styles['default'])]]

        t=Table(data, hAlign='LEFT', colWidths=[4 * cm, 12 * cm])
        t.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),'TOP')]))
        story.append(t)
        story.append(Spacer(1, 16))

    story.append(Spacer(1, 24))

    story.append(Paragraph('A) Data & Method Preparation', styles['title2']))
    writeTasks(story, styles, reqInfo)
    story.append(Spacer(1, 24))

    story.append(Paragraph('B) Experiment Analysis Steps', styles['title2']))
    writeTasks(story, styles, stepInfo)
    story.append(Spacer(1, 24))

    story.append(Paragraph('C) Result Reporting', styles['title2']))
    writeTasks(story, styles, reportingInfo)

    # write the document to disk
    doc.build(story)

    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response
def main ():


    print("Confluent Kafka Version: %s - Libversion: %s" % (version(), libversion()))
    print("")
    print("Using weights file: %s" % weights_file)
    print("")
    print("Consuming raw video frames from %s" % topic_frames)
    print("")
    print("Producing classified video frames to: %s" % topic_class_frames)
    print("")
    print("Consumer group name: %s" % consumer_group)
    print("")
    print("Consumer group start: %s" % consumer_group_start)
    print("")
    print("Debug is set to %s" % debug)
    print("")
    print("nomsgcnt is set to %s - if this is greater than 0, then when we have that many attempts to read a msg from MapR Streams, we will exit" % nomsgcnt)


    con_conf = {'bootstrap.servers': '', 'group.id': consumer_group, 'default.topic.config': {'auto.offset.reset': consumer_group_start}}
    pro_conf = {'bootstrap.servers': '', 'message.max.bytes':'2978246'}
    c = Consumer(con_conf)
    p = Producer(pro_conf)

    c.subscribe([topic_frames])
    lastmsgtime = time.time()
    nomsg = 0
    running = True
    while running:
        msg = c.poll(timeout=1.0)
        if msg is None:
            nomsg += 1
            if debug:
                print("No Message - Continuing")
            if nomsgcnt > 0 and nomsg >= nomsgcnt:
                print("%s itterations with no messages reached - Exiting Gracefully")
                sys.exit(0)
            continue
        if not msg.error():
            mymsg = json.loads(msg.value().decode('utf-8'), object_pairs_hook=OrderedDict)
            mypart = msg.partition()
            myoffset = msg.offset()
            outmsg = OrderedDict()
            outmsg['ts'] = mymsg['ts']
            outmsg['epoch_ts'] = mymsg['epoch_ts']
            outmsg['cam_name'] = mymsg['cam_name']
            outmsg['src_partition'] = mypart
            outmsg['src_offset'] = myoffset
            mybytes = base64.b64decode(mymsg['img'])
            o = open("/dev/shm/tmp.jpg", "wb")
            o.write(mybytes)
            o.close
#            myimage = np.array(Image.open(BytesIO(mybytes))) 
            curmsgtime = time.time()
            msgdelta = curmsgtime - lastmsgtime
            if debug:
                print("Time between last processed messages: %s" %  msgdelta)
            lastmsgtime = curmsgtime

            r = python.darknet.detect(net, meta, b'/dev/shm/tmp.jpg')
            if r != []:
                if debug:
                    print("Got classification!")
                curtime = datetime.datetime.now()
                mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
                epochtime = int(time.time())

                arclass = []
                if save_images == 1:
                    try:
                        image = Image.open(BytesIO(mybytes)).convert("RGBA")
                    except:
                        continue
                    draw = ImageDraw.Draw(image)
                    for q in r:
                        j = OrderedDict()
                        name = q[0]
                        j['name'] = name.decode()
                        predict = q[1]
                        j['predict'] = predict
                        x = q[2][0]
                        y = q[2][1]
                        w = q[2][2]
                        z = q[2][3]
                        x_max = (2*x+w)/2
                        x_min = (2*x-w)/2
                        y_min = (2*y-z)/2
                        y_max = (2*y+z)/2
                        j['x_min'] = x_min
                        j['x_max'] = x_max
                        j['y_min'] = y_min
                        j['y_max'] = y_max
                        for x in range(border):
                            draw.rectangle(((x_min - x, y_min - x), (x_max + x, y_max + x)), fill=None, outline="black")
                        draw.text((x_min + border + 2, y_max - border - 5), name)
                        arclass.append(j)

                    imgSave = BytesIO()
                    image.save(imgSave, format='JPEG')
                    imgSave = imgSave.getvalue()
                    encdata = base64.b64encode(imgSave)
                    encdatastr = encdata.decode('utf-8')
                else:
                    encdatastr = ""
                outmsg['class_json'] = arclass
                outmsg['class_ts'] = mystrtime
                outmsg['class_epoch_ts'] = epochtime
                outmsg['class_img'] = encdatastr
                produceMessage(p, topic_class_frames, json.dumps(outmsg))
            else:
                pass
Example #50
0
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression, async_timeout, become,
                       become_method, become_user, become_password, environment):
    """
    Given the source of the module, convert it to a Jinja2 template to insert
    module code and return whether it's a new or old style module.
    """
    module_substyle = module_style = 'old'

    # module_style is something important to calling code (ActionBase).  It
    # determines how arguments are formatted (json vs k=v) and whether
    # a separate arguments file needs to be sent over the wire.
    # module_substyle is extra information that's useful internally.  It tells
    # us what we have to look to substitute in the module files and whether
    # we're using module replacer or ansiballz to format the module itself.
    if _is_binary(b_module_data):
        module_substyle = module_style = 'binary'
    elif REPLACER in b_module_data:
        # Do REPLACER before from ansible.module_utils because we need make sure
        # we substitute "from ansible.module_utils basic" for REPLACER
        module_style = 'new'
        module_substyle = 'python'
        b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
    elif b'from ansible.module_utils.' in b_module_data:
        module_style = 'new'
        module_substyle = 'python'
    elif REPLACER_WINDOWS in b_module_data or re.search(b'#Requires \-Module', b_module_data, re.IGNORECASE):
        module_style = 'new'
        module_substyle = 'powershell'
    elif REPLACER_JSONARGS in b_module_data:
        module_style = 'new'
        module_substyle = 'jsonargs'
    elif b'WANT_JSON' in b_module_data:
        module_substyle = module_style = 'non_native_want_json'

    shebang = None
    # Neither old-style, non_native_want_json nor binary modules should be modified
    # except for the shebang line (Done by modify_module)
    if module_style in ('old', 'non_native_want_json', 'binary'):
        return b_module_data, module_style, shebang

    output = BytesIO()
    py_module_names = set()

    if module_substyle == 'python':
        params = dict(ANSIBLE_MODULE_ARGS=module_args,)
        python_repred_params = repr(json.dumps(params))

        try:
            compression_method = getattr(zipfile, module_compression)
        except AttributeError:
            display.warning(u'Bad module compression string specified: %s.  Using ZIP_STORED (no compression)' % module_compression)
            compression_method = zipfile.ZIP_STORED

        lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
        cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))

        zipdata = None
        # Optimization -- don't lock if the module has already been cached
        if os.path.exists(cached_module_filename):
            display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
            zipdata = open(cached_module_filename, 'rb').read()
        else:
            if module_name in action_write_locks.action_write_locks:
                display.debug('ANSIBALLZ: Using lock for %s' % module_name)
                lock = action_write_locks.action_write_locks[module_name]
            else:
                # If the action plugin directly invokes the module (instead of
                # going through a strategy) then we don't have a cross-process
                # Lock specifically for this module.  Use the "unexpected
                # module" lock instead
                display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
                lock = action_write_locks.action_write_locks[None]

            display.debug('ANSIBALLZ: Acquiring lock')
            with lock:
                display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
                # Check that no other process has created this while we were
                # waiting for the lock
                if not os.path.exists(cached_module_filename):
                    display.debug('ANSIBALLZ: Creating module')
                    # Create the module zip data
                    zipoutput = BytesIO()
                    zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
                    # Note: If we need to import from release.py first,
                    # remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
                    zf.writestr('ansible/__init__.py',
                                b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
                                to_bytes(__version__) + b'"\n__author__="' +
                                to_bytes(__author__) + b'"\n')
                    zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')

                    zf.writestr('ansible_module_%s.py' % module_name, b_module_data)

                    py_module_cache = {('__init__',): (b'', '[builtin]')}
                    recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
                    zf.close()
                    zipdata = base64.b64encode(zipoutput.getvalue())

                    # Write the assembled module to a temp file (write to temp
                    # so that no one looking for the file reads a partially
                    # written file)
                    if not os.path.exists(lookup_path):
                        # Note -- if we have a global function to setup, that would
                        # be a better place to run this
                        os.makedirs(lookup_path)
                    display.debug('ANSIBALLZ: Writing module')
                    with open(cached_module_filename + '-part', 'wb') as f:
                        f.write(zipdata)

                    # Rename the file into its final position in the cache so
                    # future users of this module can read it off the
                    # filesystem instead of constructing from scratch.
                    display.debug('ANSIBALLZ: Renaming module')
                    os.rename(cached_module_filename + '-part', cached_module_filename)
                    display.debug('ANSIBALLZ: Done creating module')

            if zipdata is None:
                display.debug('ANSIBALLZ: Reading module after lock')
                # Another process wrote the file while we were waiting for
                # the write lock.  Go ahead and read the data from disk
                # instead of re-creating it.
                try:
                    zipdata = open(cached_module_filename, 'rb').read()
                except IOError:
                    raise AnsibleError('A different worker process failed to create module file. '
                                       'Look at traceback for that process for debugging information.')
        zipdata = to_text(zipdata, errors='surrogate_or_strict')

        shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
        if shebang is None:
            shebang = u'#!/usr/bin/python'

        # Enclose the parts of the interpreter in quotes because we're
        # substituting it into the template as a Python string
        interpreter_parts = interpreter.split(u' ')
        interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))

        now = datetime.datetime.utcnow()
        output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
            zipdata=zipdata,
            ansible_module=module_name,
            params=python_repred_params,
            shebang=shebang,
            interpreter=interpreter,
            coding=ENCODING_STRING,
            year=now.year,
            month=now.month,
            day=now.day,
            hour=now.hour,
            minute=now.minute,
            second=now.second,
        )))
        b_module_data = output.getvalue()

    elif module_substyle == 'powershell':
        # Powershell/winrm don't actually make use of shebang so we can
        # safely set this here.  If we let the fallback code handle this
        # it can fail in the presence of the UTF8 BOM commonly added by
        # Windows text editors
        shebang = u'#!powershell'

        exec_manifest = dict(
            module_entry=to_text(base64.b64encode(b_module_data)),
            powershell_modules=dict(),
            module_args=module_args,
            actions=['exec'],
            environment=environment
        )

        exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec)))

        if async_timeout > 0:
            exec_manifest["actions"].insert(0, 'async_watchdog')
            exec_manifest["async_watchdog"] = to_text(base64.b64encode(to_bytes(async_watchdog)))
            exec_manifest["actions"].insert(0, 'async_wrapper')
            exec_manifest["async_wrapper"] = to_text(base64.b64encode(to_bytes(async_wrapper)))
            exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
            exec_manifest["async_timeout_sec"] = async_timeout

        if become and become_method == 'runas':
            exec_manifest["actions"].insert(0, 'become')
            exec_manifest["become_user"] = become_user
            exec_manifest["become_password"] = become_password
            exec_manifest["become"] = to_text(base64.b64encode(to_bytes(become_wrapper)))

        lines = b_module_data.split(b'\n')
        module_names = set()

        requires_module_list = re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'))

        for line in lines:
            # legacy, equivalent to #Requires -Modules powershell
            if REPLACER_WINDOWS in line:
                module_names.add(b'Ansible.ModuleUtils.Legacy')
            line_match = requires_module_list.match(line)
            if line_match:
                module_names.add(line_match.group(1))

        for m in set(module_names):
            m = to_text(m)
            mu_path = ps_module_utils_loader.find_plugin(m, ".psm1")
            if not mu_path:
                raise AnsibleError('Could not find imported module support code for \'%s\'.' % m)
            exec_manifest["powershell_modules"][m] = to_text(
                base64.b64encode(
                    to_bytes(
                        _slurp(mu_path)
                    )
                )
            )

        # FUTURE: smuggle this back as a dict instead of serializing here; the connection plugin may need to modify it
        module_json = json.dumps(exec_manifest)

        b_module_data = exec_wrapper.replace(b"$json_raw = ''", b"$json_raw = @'\r\n%s\r\n'@" % to_bytes(module_json))

    elif module_substyle == 'jsonargs':
        module_args_json = to_bytes(json.dumps(module_args))

        # these strings could be included in a third-party module but
        # officially they were included in the 'basic' snippet for new-style
        # python modules (which has been replaced with something else in
        # ansiballz) If we remove them from jsonargs-style module replacer
        # then we can remove them everywhere.
        python_repred_args = to_bytes(repr(module_args_json))
        b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
        b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
        b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))

        # The main event -- substitute the JSON args string into the module
        b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)

        facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
        b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)

    return (b_module_data, module_style, shebang)
Example #51
0
    def write(self) -> bytes:
        b = BytesIO()
        b.write(Int(self.ID, False))

        # No flags
        
        b.write(String(self.phone_number))
        
        b.write(String(self.phone_code_hash))
        
        b.write(String(self.phone_code))
        
        return b.getvalue()
Example #52
0
    def write(self) -> bytes:
        b = BytesIO()
        b.write(Int(self.ID, False))

        # No flags

        b.write(Int(self.id))

        b.write(Long(self.access_hash))

        b.write(Int(self.date))

        b.write(Int(self.admin_id))

        b.write(Int(self.participant_id))

        b.write(Bytes(self.g_a_or_b))

        b.write(Long(self.key_fingerprint))

        return b.getvalue()
Example #53
0
def doit(directory, force, force_resize, dry_run):
    """Do the thing.
    """
    orig_dir = directory / "originals"
    if not orig_dir.exists():
        print("Expected to find a sub-directory named 'originals' "
            "containing image files.", file=sys.stderr)
        sys.exit(1)
    content_file = directory / CONTENT_FILE
    if not content_file.exists():
        content = []
    else:
        with content_file.open() as cf:
            content = json.load(cf)
    old = {c['filename']: c for c in content}
    new = []
    done = []
    for path in sorted(orig_dir.glob('*.jpg')):
        if path.is_file():
            # A candidate
            mtime = isoformat(path.stat().st_mtime)
            oe = old.get(path.name)
            if (force or oe is None or oe.get('mtime') != mtime):
                # File is new or changed
                new.append(dict(
                    filename=path.name,
                    mtime=mtime,
                    path=path,
                    ID=re.sub(r'[^\w-]', '-', path.stem),
                    downloadURL=f"/images/{orig_dir.name}/{path.name}"))
            else:
                # We have up-to-date info for this file
                done.append(oe)

    if len(new) == 0:
        logging.info("No changes, exiting.")
        return

    resized_dir = directory / "resized"
    if not resized_dir.exists():
        resized_dir.mkdir()

    # Process new files
    for data in new:
        path = data.pop("path")
        logging.info("Processing %s", path.name)
        img = Image.open(path)

        # Save the EXIF data so we can write it back out
        exif_bytes = img.info.get('exif', b'')

        if img.width > MAX_WIDTH or img.height > MAX_HEIGHT:
            # Image too large, need maxpect image for web display
            logging.info("Image too large (%d x %d)", img.width, img.height)
            resized_name = f"web-{path.name}"
            resized_path = resized_dir / resized_name
            if resized_path.exists() and not force_resize:
                logging.info("Reading size of existing maxpect")
                maxpect = Image.open(resized_path)
            else:
                logging.info("Making maxpect")
                maxpect = img.copy()
                # thumbnail() method modifies image, preserves aspect ratio.
                # Image.LANCZOS is the best quality and seems plenty fast
                # Image.BICUBIC is faster but lower quality.
                maxpect.thumbnail(
                    (MAX_WIDTH, MAX_HEIGHT), resample=Image.LANCZOS)
                logging.debug('Saving maxpect as "%s"', resized_path)
                if not dry_run:
                    maxpect.save(resized_path,
                        quality=90,
                        progressive=True,
                        optimized=True,
                        exif=exif_bytes,
                        icc_profile=img.info.get('icc_profile'))
            data["imgWidth"] = maxpect.width
            data["imgHeight"] = maxpect.height
            data["src"] = f'/images/{resized_dir.name}/{resized_name}'
        else:
            data["src"] = data["downloadURL"]
            data["imgWidth"] = img.width
            data["imgHeight"] = img.height

        read_exif_metadata(img, data)
        if "title" not in data:
            # Nothing in EXIF, use the filename
            if not re.search(r'\d{5}', path.name):
                # Doesn't look like a serial number, assume it's text and try
                # to make it pretty.
                data['title'] = titlecase(re.sub(r'[_-]', ' ', path.stem))
            else:
                data['title'] = path.name

        # make thumbnail (cropping to 90%)
        thumb_path = resized_dir / f"thumb-{path.name}"
        logging.info("Making thumbnail %s", thumb_path)
        crop_coords = (
            img.width / 20,
            img.height / 20,
            img.width - img.width / 20,
            img.height - img.height / 20
        )
        thumb = img.crop(crop_coords)
        hratio = thumb.height / THUMBNAIL_HEIGHT
        thumb.thumbnail((thumb.width * hratio, THUMBNAIL_HEIGHT))
        if not dry_run:
            thumb.save(thumb_path)
        data["srct"] = f"/images/{resized_dir.name}/{thumb_path.name}"
        data["imgtWidth"] = thumb.width
        data["imgtHeight"] = thumb.height

        # Get dominant colors
        #  Resize to ~20x20, blur, create gif, base64 encode
        # (Fancier method: https://github.com/fengsp/color-thief-py)
        logging.info("Creating 'dominant colors' gif")
        thumb.thumbnail((15, 15))
        blurred = thumb.filter(filter=ImageFilter.BLUR)
        bio = BytesIO()
        blurred.save(bio, format="GIF")
        gif_encoded = binascii.b2a_base64(bio.getvalue()).decode('utf8')
        # Add to new dict
        data['imageDominantColors'] = f"data:image/gif;base64,{gif_encoded}"

        done.append(data)

    # FIXME Remove orphaned thumbs and originals

    # Write new CONTENT_FILE
    done.sort(key=lambda x: x.get('exifTime', x['mtime']), reverse=True)
    if dry_run:
        print(json.dumps(done, indent=1), file=sys.stderr)
    else:
        # Make symlink to latest thumbnail image
        latest = Path(done[0]['srct']).name
        symlink_path = resized_dir / 'latest.jpg'
        try:
            if symlink_path.exists() or symlink_path.is_symlink():
                logging.debug("unlinking old symlink %s", symlink_path)
                symlink_path.unlink()
            logging.info("Creating 'latest.jpg' symlink %s -> %s", symlink_path, latest)
            (symlink_path).symlink_to(latest)
        except OSError as e:
            logging.error("Failed to create 'latest.jpg' symlink: " + str(e))
        # Write JSON
        logging.info("Writing %s", directory / CONTENT_FILE)
        with (directory / CONTENT_FILE).open(mode='w') as fp:
            json.dump(done, fp, indent=1)
    def decoderawtransaction_asm_sighashtype(self):
        """Test decoding scripts via RPC command "decoderawtransaction".

        This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
        """

        # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
        tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
        rpc_result = self.nodes[0].decoderawtransaction(tx)
        assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])

        # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
        # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
        # verify that we have not altered scriptPubKey decoding.
        tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
        rpc_result = self.nodes[0].decoderawtransaction(tx)
        assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
        assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
        assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
        assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
        txSave = CTransaction()
        txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))

        # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
        tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
        rpc_result = self.nodes[0].decoderawtransaction(tx)
        assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])

        # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
        tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
        rpc_result = self.nodes[0].decoderawtransaction(tx)
        assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
        assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])

        # some more full transaction tests of varying specific scriptSigs. used instead of
        # tests in decodescript_script_sig because the decodescript RPC is specifically
        # for working on scriptPubKeys (argh!).
        push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
        signature = push_signature[2:]
        der_signature = signature[:-2]
        signature_sighash_decoded = der_signature + '[ALL]'
        signature_2 = der_signature + '82'
        push_signature_2 = '48' + signature_2
        signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'

        # 1) P2PK scriptSig
        txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
        assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])

        # make sure that the sighash decodes come out correctly for a more complex / lesser used case.
        txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
        assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])

        # 2) multisig scriptSig
        txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
        assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])

        # 3) test a scriptSig that contains more than push operations.
        # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
        txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
        rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
        assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
Example #55
0
def serve_pil_image(pil_img):
    img_io = BytesIO()
    pil_img.save(img_io, 'PNG', quality=70)
    img_io.seek(0)
    return send_file(img_io, mimetype='image/png')
Example #56
0
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        # No flags

        data.write(self.peer.write())

        data.write(Int(self.expires))

        data.write(Int(self.distance))

        return data.getvalue()
Example #57
0
 def test_real(bytes_real, real):
     reader = PDFObjectReader(BytesIO(bytes_real))
     result = reader.next_item()
     assert isinstance(result, cos.Real) and result == real
Example #58
0
 def _process(self):
     pdf = ProgrammeToPDF(self.event)
     return send_file('program.pdf', BytesIO(pdf.getPDFBin()), 'application/pdf')
Example #59
0
 def test_integer(bytes_integer, integer):
     reader = PDFObjectReader(BytesIO(bytes_integer))
     result = reader.next_item()
     assert isinstance(result, cos.Integer) and result == integer
Example #60
0
 def test_name(bytes_name, unicode_name):
     reader = PDFObjectReader(BytesIO(bytes_name))
     result = reader.next_item()
     assert isinstance(result, cos.Name) and str(result) == unicode_name