def compare_digest_buffer(space, w_a, w_b): a = space.charbuf_w(w_a) b = space.charbuf_w(w_b) with rffi.scoped_nonmovingbuffer(a) as a_buf: with rffi.scoped_nonmovingbuffer(b) as b_buf: result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) return space.newbool(rffi.cast(lltype.Bool, result))
def compare_digest_buffer(space, w_a, w_b): a = space.bufferstr_w(w_a) b = space.bufferstr_w(w_b) with rffi.scoped_nonmovingbuffer(a) as a_buf: with rffi.scoped_nonmovingbuffer(b) as b_buf: result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) return space.wrap(rffi.cast(lltype.Bool, result))
def compare_digest_buffer(space, w_a, w_b): try: a_buf = w_a.buffer_w(space, space.BUF_SIMPLE) b_buf = w_b.buffer_w(space, space.BUF_SIMPLE) except TypeError: raise oefmt(space.w_TypeError, "unsupported operand types(s) or combination of types: " "'%T' and '%T'", w_a, w_b) a = a_buf.as_str() b = b_buf.as_str() with rffi.scoped_nonmovingbuffer(a) as a_buf: with rffi.scoped_nonmovingbuffer(b) as b_buf: result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) return space.wrap(rffi.cast(lltype.Bool, result))
def OpenKey(space, w_key, w_sub_key, reserved=0, access=rwinreg.KEY_READ): """ key = OpenKey(key, sub_key, res = 0, sam = KEY_READ) - Opens the specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that identifies the sub_key to open res is a reserved integer, and must be zero. Default is zero. sam is an integer that specifies an access mask that describes the desired security access for the key. Default is KEY_READ The result is a new handle to the specified key If the function fails, an EnvironmentError exception is raised.""" hkey = hkey_w(w_key, space) utf8 = space.utf8_w(w_sub_key) state = space.fromcache(CodecState) errh = state.encode_error_handler subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=False) with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegOpenKeyExW(hkey, subkeyP, reserved, access, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegOpenKeyEx') return W_HKEY(space, rethkey[0])
def _decompress_buf(self, data, max_length): total_in = len(data) in_bufsize = min(total_in, MAX_BUFSIZE) total_in -= in_bufsize with rffi.scoped_nonmovingbuffer(data) as in_buf: # setup the input and the size it can consume self.bzs.c_next_in = in_buf rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize) self.left_to_process = in_bufsize with OutBuffer(self.bzs, max_length=max_length) as out: while True: bzreturn = BZ2_bzDecompress(self.bzs) # add up the size that has not been processed avail_in = rffi.getintfield(self.bzs, 'c_avail_in') self.left_to_process = avail_in if bzreturn == BZ_STREAM_END: self.running = False break if bzreturn != BZ_OK: _catch_bz2_error(self.space, bzreturn) if self.left_to_process == 0: break elif rffi.getintfield(self.bzs, 'c_avail_out') == 0: if out.get_data_size() == max_length: break out.prepare_next_chunk() self.left_to_process += total_in res = out.make_result_string() return self.space.newbytes(res)
def deflateSetDictionary(stream, string): with rffi.scoped_nonmovingbuffer(string) as buf: err = _deflateSetDictionary(stream, rffi.cast(Bytefp, buf), len(string)) if err == Z_STREAM_ERROR: raise RZlibError( "Parameter is invalid or the stream state is inconsistent")
def _decode_helper(cp, s, flags, encoding, errors, errorhandler, final, start, end, res): if end > len(s): end = len(s) piece = s[start:end] with rffi.scoped_nonmovingbuffer(piece) as dataptr: # first get the size of the result outsize = MultiByteToWideChar(cp, flags, dataptr, len(piece), lltype.nullptr(rffi.CWCHARP.TO), 0) if outsize == 0: r, pos = _decode_cp_error(s, errorhandler, encoding, errors, final, start, end) res.append(r) return pos, check_utf8(r, True) with rffi.scoped_alloc_unicodebuffer(outsize) as buf: # do the conversion if MultiByteToWideChar(cp, flags, dataptr, len(piece), buf.raw, outsize) == 0: r, pos = _decode_cp_error(s, errorhandler, encoding, errors, final, start, end) res.append(r) return pos, check_utf8(r, True) buf_as_str = buf.str(outsize) assert buf_as_str is not None with rffi.scoped_nonmoving_unicodebuffer(buf_as_str) as dataptr: conv = _unibuf_to_utf8(dataptr, outsize) res.append(conv) return end, codepoints_in_utf8(conv)
def compare_digest_buffer(space, w_a, w_b): try: a_buf = w_a.buffer_w(space, space.BUF_SIMPLE) b_buf = w_b.buffer_w(space, space.BUF_SIMPLE) except BufferInterfaceNotFound: raise oefmt( space.w_TypeError, "unsupported operand types(s) or combination of types: " "'%T' and '%T'", w_a, w_b) a = a_buf.as_str() b = b_buf.as_str() with rffi.scoped_nonmovingbuffer(a) as a_buf: with rffi.scoped_nonmovingbuffer(b) as b_buf: result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) return space.wrap(rffi.cast(lltype.Bool, result))
def siphash24(s): """'s' is a normal string. Returns its siphash-2-4 as a r_uint64. Don't forget to cast the result to a regular integer if needed, e.g. with rarithmetic.intmask(). """ with rffi.scoped_nonmovingbuffer(s) as p: return _siphash24(llmemory.cast_ptr_to_adr(p), len(s))
def ConnectRegistry(space, w_machine, w_hkey): """ key = ConnectRegistry(computer_name, key) Establishes a connection to a predefined registry handle on another computer. computer_name is the name of the remote computer, of the form \\\\computername. If None, the local computer is used. key is the predefined handle to connect to. The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" hkey = hkey_w(w_hkey, space) if space.is_none(w_machine): with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(None, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0]) else: utf8 = space.utf8_w(w_machine) state = space.fromcache(CodecState) errh = state.encode_error_handler machineW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=False) with rffi.scoped_nonmovingbuffer(machineW) as machineP0: machineP = rffi.cast(rwin32.LPWSTR, rffi.ptradd(machineP0, 2)) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistryW(machineP, hkey, rethkey) if ret != 0: raiseWindowsError(space, ret, 'RegConnectRegistry') return W_HKEY(space, rethkey[0])
def inflateSetDictionary(stream, string): with rffi.scoped_nonmovingbuffer(string) as buf: err = _inflateSetDictionary(stream, rffi.cast(Bytefp, buf), len(string)) if err == Z_STREAM_ERROR: raise RZlibError("Parameter is invalid or the stream state is inconsistent") elif err == Z_DATA_ERROR: raise RZlibError("The given dictionary doesn't match the expected one")
def _operate(stream, data, flush, max_length, cfunc, while_doing): """Common code for compress() and decompress(). """ # Prepare the input buffer for the stream assert data is not None # XXX seems to be sane assumption, however not for sure with rffi.scoped_nonmovingbuffer(data) as inbuf: stream.c_next_in = rffi.cast(Bytefp, inbuf) rffi.setintfield(stream, 'c_avail_in', len(data)) # Prepare the output buffer with lltype.scoped_alloc(rffi.CCHARP.TO, OUTPUT_BUFFER_SIZE) as outbuf: # Strategy: we call deflate() to get as much output data as fits in # the buffer, then accumulate all output into a StringBuffer # 'result'. result = StringBuilder() while True: stream.c_next_out = rffi.cast(Bytefp, outbuf) bufsize = OUTPUT_BUFFER_SIZE if max_length < bufsize: if max_length <= 0: err = Z_OK break bufsize = max_length max_length -= bufsize rffi.setintfield(stream, 'c_avail_out', bufsize) err = cfunc(stream, flush) if err == Z_OK or err == Z_STREAM_END: # accumulate data into 'result' avail_out = rffi.cast(lltype.Signed, stream.c_avail_out) result.append_charpsize(outbuf, bufsize - avail_out) # if the output buffer is full, there might be more data # so we need to try again. Otherwise, we're done. if avail_out > 0: break # We're also done if we got a Z_STREAM_END (which should # only occur when flush == Z_FINISH). if err == Z_STREAM_END: break else: continue elif err == Z_BUF_ERROR: avail_out = rffi.cast(lltype.Signed, stream.c_avail_out) # When compressing, we will only get Z_BUF_ERROR if # the output buffer was full but there wasn't more # output when we tried again, so it is not an error # condition. if avail_out == bufsize: break # fallback case: report this error raise RZlibError.fromstream(stream, err, while_doing) # When decompressing, if the compressed stream of data was truncated, # then the zlib simply returns Z_OK and waits for more. If it is # complete it returns Z_STREAM_END. return (result.build(), err, rffi.cast(lltype.Signed, stream.c_avail_in))
def adler32(string, start=ADLER32_DEFAULT_START): """ Compute the Adler-32 checksum of the string, possibly with the given start value, and return it as a unsigned 32 bit integer. """ with rffi.scoped_nonmovingbuffer(string) as bytes: checksum = _adler32(start, rffi.cast(Bytefp, bytes), len(string)) return checksum
def adler32(string, start=ADLER32_DEFAULT_START): """ Compute the Adler-32 checksum of the string, possibly with the given start value, and return it as a unsigned 32 bit integer. """ with rffi.scoped_nonmovingbuffer(string) as bytes: checksum = _adler32(start, rffi.cast(Bytefp, bytes), len(string)) return checksum
def SetValue(space, w_hkey, w_subkey, typ, w_value): """ SetValue(key, sub_key, type, value) - Associates a value with a specified key. key is an already open key, or any one of the predefined HKEY_* constants. sub_key is a string that names the subkey with which the value is associated. type is an integer that specifies the type of the data. Currently this must be REG_SZ, meaning only strings are supported. value is a string that specifies the new value. If the key specified by the sub_key parameter does not exist, the SetValue function creates it. Value lengths are limited by available memory. Long values (more than 2048 bytes) should be stored as files with the filenames stored in the configuration registry. This helps the registry perform efficiently. The key identified by the key parameter must have been opened with KEY_SET_VALUE access.""" if typ != rwinreg.REG_SZ: raise oefmt(space.w_ValueError, "Type must be winreg.REG_SZ") hkey = hkey_w(w_hkey, space) state = space.fromcache(CodecState) errh = state.encode_error_handler utf8 = space.utf8_w(w_subkey) subkeyW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=False) utf8 = space.utf8_w(w_value) valueW = utf8_encode_utf_16(utf8 + '\x00', 'strict', errh, allow_surrogates=False) valueL = space.len_w(w_value) # Add an offset to remove the BOM from the native utf16 wstr with rffi.scoped_nonmovingbuffer(subkeyW) as subkeyP0: subkeyP = rffi.cast(rffi.CWCHARP, rffi.ptradd(subkeyP0, 2)) with rffi.scoped_nonmovingbuffer(valueW) as valueP0: valueP = rffi.cast(rffi.CWCHARP, rffi.ptradd(valueP0, 2)) ret = rwinreg.RegSetValueW(hkey, subkeyP, rwinreg.REG_SZ, valueP, valueL) if ret != 0: raiseWindowsError(space, ret, 'RegSetValue')
def multiprocessing_send(space, handle, data): if data is None: raise OperationError(space.w_ValueError, 'data cannot be None') with rffi.scoped_nonmovingbuffer(data) as dataptr: # rsocket checks for writability of socket with wait_for_data, cpython does check res = send(handle, dataptr, len(data), 0) if res < 0: raise getWindowsError(space) return space.newint(res)
def inflateSetDictionary(stream, string): with rffi.scoped_nonmovingbuffer(string) as buf: err = _inflateSetDictionary(stream, rffi.cast(Bytefp, buf), len(string)) if err == Z_STREAM_ERROR: raise RZlibError( "Parameter is invalid or the stream state is inconsistent") elif err == Z_DATA_ERROR: raise RZlibError("The given dictionary doesn't match the expected one")
def _operate(stream, data, flush, max_length, cfunc, while_doing): """Common code for compress() and decompress(). """ # Prepare the input buffer for the stream assert data is not None # XXX seems to be sane assumption, however not for sure with rffi.scoped_nonmovingbuffer(data) as inbuf: stream.c_next_in = rffi.cast(Bytefp, inbuf) rffi.setintfield(stream, 'c_avail_in', len(data)) # Prepare the output buffer with lltype.scoped_alloc(rffi.CCHARP.TO, OUTPUT_BUFFER_SIZE) as outbuf: # Strategy: we call deflate() to get as much output data as fits in # the buffer, then accumulate all output into a StringBuffer # 'result'. result = StringBuilder() while True: stream.c_next_out = rffi.cast(Bytefp, outbuf) bufsize = OUTPUT_BUFFER_SIZE if max_length < bufsize: if max_length <= 0: err = Z_OK break bufsize = max_length max_length -= bufsize rffi.setintfield(stream, 'c_avail_out', bufsize) err = cfunc(stream, flush) if err == Z_OK or err == Z_STREAM_END: # accumulate data into 'result' avail_out = rffi.cast(lltype.Signed, stream.c_avail_out) result.append_charpsize(outbuf, bufsize - avail_out) # if the output buffer is full, there might be more data # so we need to try again. Otherwise, we're done. if avail_out > 0: break # We're also done if we got a Z_STREAM_END (which should # only occur when flush == Z_FINISH). if err == Z_STREAM_END: break else: continue elif err == Z_BUF_ERROR: avail_out = rffi.cast(lltype.Signed, stream.c_avail_out) # When compressing, we will only get Z_BUF_ERROR if # the output buffer was full but there wasn't more # output when we tried again, so it is not an error # condition. if avail_out == bufsize: break # fallback case: report this error raise RZlibError.fromstream(stream, err, while_doing) # When decompressing, if the compressed stream of data was truncated, # then the zlib simply returns Z_OK and waits for more. If it is # complete it returns Z_STREAM_END. return (result.build(), err, rffi.cast(lltype.Signed, stream.c_avail_in))
def _crc_or_adler(string, start, function): with rffi.scoped_nonmovingbuffer(string) as bytes: remaining = len(string) checksum = start ptr = rffi.cast(Bytefp, bytes) while remaining > 0: count = min(remaining, 32 * 1024 * 1024) checksum = function(checksum, ptr, count) ptr = rffi.ptradd(ptr, count) remaining -= count return checksum
def write(self, value): self._check_closed() with rffi.scoped_nonmovingbuffer(value) as ll_value: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine length = len(value) bytes = c_fwrite(ll_value, 1, length, self._ll_file) if bytes != length: errno = rposix.get_saved_errno() c_clearerr(self._ll_file) raise IOError(errno, os.strerror(errno))
def write(self, value): self._check_closed() with rffi.scoped_nonmovingbuffer(value) as ll_value: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine length = len(value) bytes = c_fwrite(ll_value, 1, length, self._ll_file) if bytes != length: errno = rposix.get_saved_errno() c_clearerr(self._ll_file) raise IOError(errno, os.strerror(errno))
def decodeex(decodebuf, stringdata, errors="strict", errorcb=None, namecb=None, ignore_error=0): inleft = len(stringdata) with rffi.scoped_nonmovingbuffer(stringdata) as inbuf: if pypy_cjk_dec_init(decodebuf, inbuf, inleft) < 0: raise MemoryError while True: r = pypy_cjk_dec_chunk(decodebuf) if r == 0 or r == ignore_error: break multibytecodec_decerror(decodebuf, r, errors, errorcb, namecb, stringdata) src = pypy_cjk_dec_outbuf(decodebuf) length = pypy_cjk_dec_outlen(decodebuf) return rffi.wcharpsize2unicode(src, length)
def PyObject_Print(space, w_obj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: w_str = space.str(w_obj) else: w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.text_w(w_str) with rffi.scoped_nonmovingbuffer(data) as buf: fwrite(buf, 1, count, fp) return 0
def PyObject_Print(space, w_obj, fp, flags): """Print an object o, on file fp. Returns -1 on error. The flags argument is used to enable certain printing options. The only option currently supported is Py_PRINT_RAW; if given, the str() of the object is written instead of the repr().""" if rffi.cast(lltype.Signed, flags) & Py_PRINT_RAW: w_str = space.str(w_obj) else: w_str = space.repr(w_obj) count = space.len_w(w_str) data = space.str_w(w_str) with rffi.scoped_nonmovingbuffer(data) as buf: fwrite(buf, 1, count, fp) return 0
def bytes_attach(space, py_obj, w_obj): """ Copy RPython string object contents to a PyBytesObject. The c_ob_sval must not be modified. """ py_str = rffi.cast(PyBytesObject, py_obj) s = space.str_w(w_obj) if py_str.c_ob_size < len(s): raise oefmt(space.w_ValueError, "bytes_attach called on object with ob_size %d but trying to store %d", py_str.c_ob_size, len(s)) with rffi.scoped_nonmovingbuffer(s) as s_ptr: rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s)) py_str.c_ob_sval[len(s)] = '\0' py_str.c_ob_shash = space.hash_w(w_obj) py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
def sendall(self, data, flags=0, signal_checker=None): """Send a data string to the socket. For the optional flags argument, see the Unix manual. This calls send() repeatedly until all data is sent. If an error occurs, it's impossible to tell how much data has been sent.""" with rffi.scoped_nonmovingbuffer(data) as dataptr: remaining = len(data) p = dataptr while remaining > 0: try: res = self.send_raw(p, remaining, flags) p = rffi.ptradd(p, res) remaining -= res except CSocketError, e: if e.errno != _c.EINTR: raise if signal_checker is not None: signal_checker()
def sendall(self, data, flags=0, signal_checker=None): """Send a data string to the socket. For the optional flags argument, see the Unix manual. This calls send() repeatedly until all data is sent. If an error occurs, it's impossible to tell how much data has been sent.""" with rffi.scoped_nonmovingbuffer(data) as dataptr: remaining = len(data) p = dataptr while remaining > 0: try: res = self.send_raw(p, remaining, flags) p = rffi.ptradd(p, res) remaining -= res except CSocketError, e: if e.errno != _c.EINTR: raise if signal_checker is not None: signal_checker()
def decompress(self, data): """decompress(data) -> string Provide more data to the decompressor object. It will return chunks of decompressed data whenever possible. If you try to decompress data after the end of stream is found, EOFError will be raised. If any data was found after the end of stream, it'll be ignored and saved in unused_data attribute.""" assert data is not None if not self.running: raise oefmt(self.space.w_EOFError, "end of stream was already found") if data == '': return self.space.newbytes('') in_bufsize = len(data) with rffi.scoped_nonmovingbuffer(data) as in_buf: self.bzs.c_next_in = in_buf rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize) with OutBuffer(self.bzs) as out: while True: bzerror = BZ2_bzDecompress(self.bzs) if bzerror == BZ_STREAM_END: if rffi.getintfield(self.bzs, 'c_avail_in') != 0: unused = [ self.bzs.c_next_in[i] for i in range( rffi.getintfield(self.bzs, 'c_avail_in')) ] self.unused_data = "".join(unused) self.running = False break if bzerror != BZ_OK: _catch_bz2_error(self.space, bzerror) if rffi.getintfield(self.bzs, 'c_avail_in') == 0: break elif rffi.getintfield(self.bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() return self.space.newbytes(res)
def decompress(self, data): """decompress(data) -> string Provide more data to the decompressor object. It will return chunks of decompressed data whenever possible. If you try to decompress data after the end of stream is found, EOFError will be raised. If any data was found after the end of stream, it'll be ignored and saved in unused_data attribute.""" if not self.running: raise OperationError(self.space.w_EOFError, self.space.wrap("end of stream was already found")) if data == '': return self.space.wrap('') in_bufsize = len(data) with rffi.scoped_nonmovingbuffer(data) as in_buf: self.bzs.c_next_in = in_buf rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize) with OutBuffer(self.bzs) as out: while True: bzerror = BZ2_bzDecompress(self.bzs) if bzerror == BZ_STREAM_END: if rffi.getintfield(self.bzs, 'c_avail_in') != 0: unused = [self.bzs.c_next_in[i] for i in range( rffi.getintfield(self.bzs, 'c_avail_in'))] self.unused_data = "".join(unused) self.running = False break if bzerror != BZ_OK: _catch_bz2_error(self.space, bzerror) if rffi.getintfield(self.bzs, 'c_avail_in') == 0: break elif rffi.getintfield(self.bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() return self.space.wrap(res)
def decodeex(decodebuf, stringdata, errors="strict", errorcb=None, namecb=None, ignore_error=0): inleft = len(stringdata) with rffi.scoped_nonmovingbuffer(stringdata) as inbuf: if pypy_cjk_dec_init(decodebuf, inbuf, inleft) < 0: raise MemoryError while True: r = pypy_cjk_dec_chunk(decodebuf) if r == 0 or r == ignore_error: break multibytecodec_decerror(decodebuf, r, errors, errorcb, namecb, stringdata) src = pypy_cjk_dec_outbuf(decodebuf) length = pypy_cjk_dec_outlen(decodebuf) return rffi.wcharpsize2unicode(src, length)
def multibytecodec_encerror(encodebuf, e, errors, errorcb, namecb, unicodedata): if e > 0: reason = "illegal multibyte sequence" esize = e elif e == MBERR_TOOFEW: reason = "incomplete multibyte sequence" esize = pypy_cjk_enc_inbuf_remaining(encodebuf) elif e == MBERR_NOMEMORY: raise MemoryError else: raise RuntimeError # # compute the string to use as a replacement -> 'replace', and # the current position in the input 'unicodedata' -> 'end' start = pypy_cjk_enc_inbuf_consumed(encodebuf) end = start + esize if errors == "strict": raise EncodeDecodeError(start, end, reason) elif errors == "ignore": replace = "" elif errors == "replace": codec = pypy_cjk_enc_getcodec(encodebuf) try: replace = encode(codec, u"?") except EncodeDecodeError: replace = "?" else: assert errorcb retu, rets, end = errorcb(errors, namecb, reason, unicodedata, start, end) if rets is not None: # py3k only replace = rets else: assert retu is not None codec = pypy_cjk_enc_getcodec(encodebuf) replace = encode(codec, retu, "strict", errorcb, namecb) with rffi.scoped_nonmovingbuffer(replace) as inbuf: r = pypy_cjk_enc_replace_on_error(encodebuf, inbuf, len(replace), end) if r == MBERR_NOMEMORY: raise MemoryError
def compress(space, data, compresslevel=9): """compress(data [, compresslevel=9]) -> string Compress data in one shot. If you want to compress data sequentially, use an instance of BZ2Compressor instead. The compresslevel parameter, if given, must be a number between 1 and 9.""" assert data is not None if compresslevel < 1 or compresslevel > 9: raise oefmt(space.w_ValueError, "compresslevel must be between 1 and 9") with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: in_bufsize = len(data) with rffi.scoped_nonmovingbuffer(data) as in_buf: bzs.c_next_in = in_buf rffi.setintfield(bzs, 'c_avail_in', in_bufsize) # conforming to bz2 manual, this is large enough to fit compressed # data in one shot. We will check it later anyway. with OutBuffer(bzs, in_bufsize + (in_bufsize / 100 + 1) + 600) as out: bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0) if bzerror != BZ_OK: _catch_bz2_error(space, bzerror) while True: bzerror = BZ2_bzCompress(bzs, BZ_FINISH) if bzerror == BZ_STREAM_END: break elif bzerror != BZ_FINISH_OK: BZ2_bzCompressEnd(bzs) _catch_bz2_error(space, bzerror) if rffi.getintfield(bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() BZ2_bzCompressEnd(bzs) return space.newbytes(res)
def compress(self, data): """compress(data) -> string Provide more data to the compressor object. It will return chunks of compressed data whenever possible. When you've finished providing data to compress, call the flush() method to finish the compression process, and return what is left in the internal buffers.""" assert data is not None try: self.lock() datasize = len(data) if datasize == 0: return self.space.newbytes("") if not self.running: raise oefmt(self.space.w_ValueError, "this object was already flushed") in_bufsize = datasize with OutBuffer(self.bzs) as out: with rffi.scoped_nonmovingbuffer(data) as in_buf: self.bzs.c_next_in = in_buf rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize) while True: bzerror = BZ2_bzCompress(self.bzs, BZ_RUN) if bzerror != BZ_RUN_OK: _catch_bz2_error(self.space, bzerror) if rffi.getintfield(self.bzs, 'c_avail_in') == 0: break elif rffi.getintfield(self.bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() return self.space.newbytes(res) finally: self.unlock()
def compress(space, data, compresslevel=9): """compress(data [, compresslevel=9]) -> string Compress data in one shot. If you want to compress data sequentially, use an instance of BZ2Compressor instead. The compresslevel parameter, if given, must be a number between 1 and 9.""" if compresslevel < 1 or compresslevel > 9: raise OperationError(space.w_ValueError, space.wrap("compresslevel must be between 1 and 9")) with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: in_bufsize = len(data) with rffi.scoped_nonmovingbuffer(data) as in_buf: bzs.c_next_in = in_buf rffi.setintfield(bzs, 'c_avail_in', in_bufsize) # conforming to bz2 manual, this is large enough to fit compressed # data in one shot. We will check it later anyway. with OutBuffer(bzs, in_bufsize + (in_bufsize / 100 + 1) + 600) as out: bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0) if bzerror != BZ_OK: _catch_bz2_error(space, bzerror) while True: bzerror = BZ2_bzCompress(bzs, BZ_FINISH) if bzerror == BZ_STREAM_END: break elif bzerror != BZ_FINISH_OK: BZ2_bzCompressEnd(bzs) _catch_bz2_error(space, bzerror) if rffi.getintfield(bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() BZ2_bzCompressEnd(bzs) return space.wrap(res)
def bytes_attach(space, py_obj, w_obj, w_userdata=None): """ Copy RPython string object contents to a PyBytesObject. The c_ob_sval must not be modified. """ py_str = rffi.cast(PyBytesObject, py_obj) s = space.bytes_w(w_obj) len_s = len(s) if py_str.c_ob_size < len_s: raise oefmt( space.w_ValueError, "bytes_attach called on object with ob_size %d but trying to store %d", py_str.c_ob_size, len_s) with rffi.scoped_nonmovingbuffer(s) as s_ptr: rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len_s) py_str.c_ob_sval[len_s] = '\0' # if py_obj has a tp_hash, this will try to call it, but the objects are # not fully linked yet #py_str.c_ob_shash = space.hash_w(w_obj) py_str.c_ob_shash = space.hash_w(space.newbytes(s)) py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
def inet_ntop(family, packed): "packed string -> human-readable string" if family == AF_INET: srcsize = sizeof(_c.in_addr) dstsize = _c.INET_ADDRSTRLEN elif AF_INET6 is not None and family == AF_INET6: srcsize = sizeof(_c.in6_addr) dstsize = _c.INET6_ADDRSTRLEN else: raise RSocketError("unknown address family") if len(packed) != srcsize: raise ValueError("packed IP wrong length for inet_ntop") with rffi.scoped_nonmovingbuffer(packed) as srcbuf: dstbuf = mallocbuf(dstsize) try: res = _c.inet_ntop(family, rffi.cast(rffi.VOIDP, srcbuf), dstbuf, dstsize) if not res: raise last_error() return rffi.charp2str(res) finally: lltype.free(dstbuf, flavor="raw")
def multibytecodec_encerror(encodebuf, e, errors, errorcb, namecb, unicodedata): if e > 0: reason = "illegal multibyte sequence" esize = e elif e == MBERR_TOOFEW: reason = "incomplete multibyte sequence" esize = pypy_cjk_enc_inbuf_remaining(encodebuf) elif e == MBERR_NOMEMORY: raise MemoryError else: raise RuntimeError # # compute the string to use as a replacement -> 'replace', and # the current position in the input 'unicodedata' -> 'end' start = pypy_cjk_enc_inbuf_consumed(encodebuf) end = start + esize if errors == "strict": raise EncodeDecodeError(start, end, reason) elif errors == "ignore": replace = "" elif errors == "replace": codec = pypy_cjk_enc_getcodec(encodebuf) try: replace = encode(codec, u"?") except EncodeDecodeError: replace = "?" else: assert errorcb retu, rets, end = errorcb(errors, namecb, reason, unicodedata, start, end) if rets is not None: # py3k only replace = rets else: assert retu is not None codec = pypy_cjk_enc_getcodec(encodebuf) replace = encode(codec, retu, "strict", errorcb, namecb) with rffi.scoped_nonmovingbuffer(replace) as inbuf: r = pypy_cjk_enc_replace_on_error(encodebuf, inbuf, len(replace), end) if r == MBERR_NOMEMORY: raise MemoryError
def multibytecodec_encerror(encodebuf, e, errors, errorcb, namecb, unicodedata): if e > 0: reason = "illegal multibyte sequence" esize = e elif e == MBERR_TOOFEW: reason = "incomplete multibyte sequence" esize = pypy_cjk_enc_inbuf_remaining(encodebuf) elif e == MBERR_NOMEMORY: raise MemoryError else: raise RuntimeError # # compute the string to use as a replacement -> 'replace', and # the current position in the input 'unicodedata' -> 'end' start = pypy_cjk_enc_inbuf_consumed(encodebuf) end = start + esize if errors == "strict": raise EncodeDecodeError(start, end, reason) elif errors == "ignore": replace = "" rettype = 'b' # != 'u' elif errors == "replace": replace = "?" # utf-8 unicode rettype = 'u' else: assert errorcb replace, end, rettype, obj = errorcb(errors, namecb, reason, unicodedata, start, end) if rettype == 'u': codec = pypy_cjk_enc_getcodec(encodebuf) lgt = rutf8.check_utf8(replace, False) replace = encode(codec, replace, lgt, copystate=encodebuf) #else: # replace is meant to be a byte string already with rffi.scoped_nonmovingbuffer(replace) as inbuf: r = pypy_cjk_enc_replace_on_error(encodebuf, inbuf, len(replace), end) if r == MBERR_NOMEMORY: raise MemoryError
def inet_ntop(family, packed): "packed string -> human-readable string" if family == AF_INET: srcsize = sizeof(_c.in_addr) dstsize = _c.INET_ADDRSTRLEN elif AF_INET6 is not None and family == AF_INET6: srcsize = sizeof(_c.in6_addr) dstsize = _c.INET6_ADDRSTRLEN else: raise RSocketError("unknown address family") if len(packed) != srcsize: raise ValueError("packed IP wrong length for inet_ntop") with rffi.scoped_nonmovingbuffer(packed) as srcbuf: dstbuf = mallocbuf(dstsize) try: res = _c.inet_ntop(family, rffi.cast(rffi.VOIDP, srcbuf), dstbuf, dstsize) if not res: raise last_error() return rffi.charp2str(res) finally: lltype.free(dstbuf, flavor='raw')
def decompress(space, data): """decompress(data) -> decompressed data Decompress data in one shot. If you want to decompress data sequentially, use an instance of BZ2Decompressor instead.""" assert data is not None in_bufsize = len(data) if in_bufsize == 0: return space.newbytes("") with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: with rffi.scoped_nonmovingbuffer(data) as in_buf: bzs.c_next_in = in_buf rffi.setintfield(bzs, 'c_avail_in', in_bufsize) with OutBuffer(bzs) as out: bzerror = BZ2_bzDecompressInit(bzs, 0, 0) if bzerror != BZ_OK: _catch_bz2_error(space, bzerror) while True: bzerror = BZ2_bzDecompress(bzs) if bzerror == BZ_STREAM_END: break if bzerror != BZ_OK: BZ2_bzDecompressEnd(bzs) _catch_bz2_error(space, bzerror) if rffi.getintfield(bzs, 'c_avail_in') == 0: BZ2_bzDecompressEnd(bzs) raise oefmt(space.w_ValueError, "couldn't find end of stream") elif rffi.getintfield(bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() BZ2_bzDecompressEnd(bzs) return space.newbytes(res)
def decompress(space, data): """decompress(data) -> decompressed data Decompress data in one shot. If you want to decompress data sequentially, use an instance of BZ2Decompressor instead.""" in_bufsize = len(data) if in_bufsize == 0: return space.wrap("") with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: with rffi.scoped_nonmovingbuffer(data) as in_buf: bzs.c_next_in = in_buf rffi.setintfield(bzs, 'c_avail_in', in_bufsize) with OutBuffer(bzs) as out: bzerror = BZ2_bzDecompressInit(bzs, 0, 0) if bzerror != BZ_OK: _catch_bz2_error(space, bzerror) while True: bzerror = BZ2_bzDecompress(bzs) if bzerror == BZ_STREAM_END: break if bzerror != BZ_OK: BZ2_bzDecompressEnd(bzs) _catch_bz2_error(space, bzerror) if rffi.getintfield(bzs, 'c_avail_in') == 0: BZ2_bzDecompressEnd(bzs) raise OperationError(space.w_ValueError, space.wrap( "couldn't find end of stream")) elif rffi.getintfield(bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() BZ2_bzDecompressEnd(bzs) return space.wrap(res)
def _Pattern_match_search(vm, anchored): mod = vm.get_funcs_mod() (self, s_o, sp_o),_ = vm.decode_args(mand="!S", opt="I", self_of=Pattern) assert isinstance(self, Pattern) assert isinstance(s_o, Con_String) ovect_size = (1 + self.num_caps) * 3 ovect = lltype.malloc(rffi.INTP.TO, ovect_size, flavor="raw") if anchored: flags = PCRE_ANCHORED else: flags = 0 sp = translate_idx_obj(vm, sp_o, len(s_o.v)) with rffi.scoped_nonmovingbuffer(s_o.v) as rs: r = int(pcre_exec(self.cp, None, rs, len(s_o.v), sp, flags, ovect, ovect_size)) if r < 0: if r == PCRE_ERROR_NOMATCH: lltype.free(ovect, flavor="raw") return vm.get_builtin(BUILTIN_FAIL_OBJ) else: raise Exception("XXX") return Match(vm, mod.get_defn(vm, "Match"), ovect, self.num_caps, s_o)
def write_w(self, space, w_data): if self.handle == rwin32.INVALID_HANDLE_VALUE: raise err_closed(space) if not self.writable: raise err_mode(space, "writing") utf8 = space.utf8_w(w_data) if not len(utf8): return space.newint(0) # TODO: break up the encoding into chunks to save memory state = space.fromcache(CodecState) errh = state.encode_error_handler utf16 = utf8_encode_utf_16(utf8, 'strict', errh, allow_surrogates=False) wlen = len(utf16) // 2 with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as n: with rffi.scoped_nonmovingbuffer(utf16) as dataptr: # skip BOM, start at 1 offset = 1 while offset < wlen: res = rwin32.WriteConsoleW( self.handle, rffi.cast(rwin32.LPVOID, rffi.ptradd(dataptr, offset * 2)), wlen - offset, n, rffi.cast(rwin32.LPVOID, 0)) if not res: err = rwin32.GetLastError_saved() raise OperationError(space.w_WindowsError, space.newint(err)) nwrote = intmask(n[0]) offset += nwrote return space.newint(offset - 1)
def compress(self, data): """compress(data) -> string Provide more data to the compressor object. It will return chunks of compressed data whenever possible. When you've finished providing data to compress, call the flush() method to finish the compression process, and return what is left in the internal buffers.""" datasize = len(data) if datasize == 0: return self.space.wrap("") if not self.running: raise OperationError(self.space.w_ValueError, self.space.wrap("this object was already flushed")) in_bufsize = datasize with OutBuffer(self.bzs) as out: with rffi.scoped_nonmovingbuffer(data) as in_buf: self.bzs.c_next_in = in_buf rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize) while True: bzerror = BZ2_bzCompress(self.bzs, BZ_RUN) if bzerror != BZ_RUN_OK: _catch_bz2_error(self.space, bzerror) if rffi.getintfield(self.bzs, 'c_avail_in') == 0: break elif rffi.getintfield(self.bzs, 'c_avail_out') == 0: out.prepare_next_chunk() res = out.make_result_string() return self.space.wrap(res)
def deflateSetDictionary(stream, string): with rffi.scoped_nonmovingbuffer(string) as buf: err = _deflateSetDictionary(stream, rffi.cast(Bytefp, buf), len(string)) if err == Z_STREAM_ERROR: raise RZlibError("Parameter is invalid or the stream state is inconsistent")
def _compare_two_strings(a, b): with rffi.scoped_nonmovingbuffer(a) as a_buf: with rffi.scoped_nonmovingbuffer(b) as b_buf: result = pypy_tscmp(a_buf, b_buf, len(a), len(b)) return rffi.cast(lltype.Bool, result)
def update(self, space, string): with rffi.scoped_nonmovingbuffer(string) as buf: with self.lock: # XXX try to not release the GIL for small requests ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string))
def send(self, msg, prio): with rffi.scoped_nonmovingbuffer(msg) as msg_ptr: check_call(mq_send(self.mqd, msg_ptr, len(msg), prio), "mq_send")
def update(self, space, string): with rffi.scoped_nonmovingbuffer(string) as buf: with self.lock: # XXX try to not release the GIL for small requests ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string))
def send(self, data, flags=0): """Send a data string to the socket. For the optional flags argument, see the Unix manual. Return the number of bytes sent; this may be less than len(data) if the network is busy.""" with rffi.scoped_nonmovingbuffer(data) as dataptr: return self.send_raw(dataptr, len(data), flags)