def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # if ((fresult.size < 0 and not isinstance(fresult, ctypevoid.W_CTypeVoid)) or isinstance(fresult, ctypearray.W_CTypeArray)): if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and fresult.size < 0): raise oefmt(space.w_TypeError, "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, "invalid result type: '%s'", fresult.name) # fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) unique_cache = space.fromcache(UniqueCache) func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: if weakdict.get(func_hash) is None: weakdict.set(func_hash, fct) break else: weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) unique_cache.functions.append(weakdict) weakdict.set(func_hash, fct) return fct
def descr_pow(self, space, w_exponent, w_modulus=None): if isinstance(w_exponent, W_IntObject): w_exponent = w_exponent.as_w_long(space) elif not isinstance(w_exponent, W_AbstractLongObject): return space.w_NotImplemented if space.is_none(w_modulus): if w_exponent.asbigint().sign < 0: self = self.descr_float(space) w_exponent = w_exponent.descr_float(space) return space.pow(self, w_exponent, space.w_None) return W_LongObject(self.num.pow(w_exponent.asbigint())) elif isinstance(w_modulus, W_IntObject): w_modulus = w_modulus.as_w_long(space) elif not isinstance(w_modulus, W_AbstractLongObject): return space.w_NotImplemented if w_exponent.asbigint().sign < 0: raise oefmt(space.w_TypeError, "pow() 2nd argument cannot be negative when 3rd " "argument specified") try: result = self.num.pow(w_exponent.asbigint(), w_modulus.asbigint()) except ValueError: raise oefmt(space.w_ValueError, "pow 3rd argument cannot be 0") return W_LongObject(result)
def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): argtypes_w, argtypes, w_restype, restype = unpack_argtypes( space, w_argtypes, w_restype) if space.isinstance_w(w_name, space.w_str): name = space.str_w(w_name) try: func = CDLL.cdll.getpointer(name, argtypes, restype, flags = CDLL.flags) except KeyError: raise oefmt(space.w_AttributeError, "No symbol %s found in library %s", name, CDLL.name) except LibFFIError: raise got_libffi_error(space) return W_FuncPtr(func, argtypes_w, w_restype) elif space.isinstance_w(w_name, space.w_int): ordinal = space.int_w(w_name) try: func = CDLL.cdll.getpointer_by_ordinal( ordinal, argtypes, restype, flags = CDLL.flags) except KeyError: raise oefmt(space.w_AttributeError, "No ordinal %d found in library %s", ordinal, CDLL.name) except LibFFIError: raise got_libffi_error(space) return W_FuncPtr(func, argtypes_w, w_restype) else: raise OperationError(space.w_TypeError, space.wrap( 'function name must be a string or integer'))
def descr_function__new__(space, w_subtype, w_code, w_globals, w_name=None, w_argdefs=None, w_closure=None): code = space.interp_w(Code, w_code) if not space.isinstance_w(w_globals, space.w_dict): raise oefmt(space.w_TypeError, "expected dict") if not space.is_none(w_name): name = space.str_w(w_name) else: name = None if not space.is_none(w_argdefs): defs_w = space.fixedview(w_argdefs) else: defs_w = [] nfreevars = 0 from pypy.interpreter.pycode import PyCode if isinstance(code, PyCode): nfreevars = len(code.co_freevars) if space.is_none(w_closure) and nfreevars == 0: closure = None elif not space.is_w(space.type(w_closure), space.w_tuple): raise oefmt(space.w_TypeError, "invalid closure") else: from pypy.interpreter.nestedscope import Cell closure_w = space.unpackiterable(w_closure) n = len(closure_w) if nfreevars == 0: raise oefmt(space.w_ValueError, "no closure needed") elif nfreevars != n: raise oefmt(space.w_ValueError, "closure is wrong size") closure = [space.interp_w(Cell, w_cell) for w_cell in closure_w] func = space.allocate_instance(Function, w_subtype) Function.__init__(func, space, code, w_globals, defs_w, closure, name) return space.wrap(func)
def descr_translate(self, space, w_table): selfvalue = self._value w_sys = space.getbuiltinmodule('sys') maxunicode = space.int_w(space.getattr(w_sys, space.wrap("maxunicode"))) result = [] for unichar in selfvalue: try: w_newval = space.getitem(w_table, space.wrap(ord(unichar))) except OperationError as e: if e.match(space, space.w_LookupError): result.append(unichar) else: raise else: if space.is_w(w_newval, space.w_None): continue elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: raise oefmt(space.w_TypeError, "character mapping must be in range(%s)", hex(maxunicode + 1)) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: raise oefmt(space.w_TypeError, "character mapping must return integer, None " "or str") return W_UnicodeObject(u''.join(result))
def PyUnicode_FromEncodedObject(space, w_obj, encoding, errors): """Coerce an encoded object obj to an Unicode object and return a reference with incremented refcount. String and other char buffer compatible objects are decoded according to the given encoding and using the error handling defined by errors. Both can be NULL to have the interface use the default values (see the next section for details). All other objects, including Unicode objects, cause a TypeError to be set.""" if not encoding: raise oefmt(space.w_TypeError, "decoding Unicode is not supported") w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = None # - unicode is disallowed # - raise TypeError for non-string types if space.isinstance_w(w_obj, space.w_unicode): w_meth = None else: try: w_meth = space.getattr(w_obj, space.wrap('decode')) except OperationError as e: if not e.match(space, space.w_AttributeError): raise w_meth = None if w_meth is None: raise oefmt(space.w_TypeError, "decoding Unicode is not supported") return space.call_function(w_meth, w_encoding, w_errors)
def descr_long(self, space): try: return W_LongObject.fromfloat(space, self.floatval) except OverflowError: raise oefmt(space.w_OverflowError, "cannot convert float infinity to integer") except ValueError: raise oefmt(space.w_ValueError, "cannot convert float NaN to integer")
def set_param(space, __args__): '''Configure the tunable JIT parameters. * set_param(name=value, ...) # as keyword arguments * set_param("name=value,name=value") # as a user-supplied string * set_param("off") # disable the jit * set_param("default") # restore all defaults ''' # XXXXXXXXX args_w, kwds_w = __args__.unpack() if len(args_w) > 1: raise oefmt(space.w_TypeError, "set_param() takes at most 1 non-keyword argument, %d " "given", len(args_w)) if len(args_w) == 1: text = space.str_w(args_w[0]) try: jit.set_user_param(None, text) except ValueError: raise OperationError(space.w_ValueError, space.wrap("error in JIT parameters string")) for key, w_value in kwds_w.items(): if key == 'enable_opts': jit.set_param(None, 'enable_opts', space.str_w(w_value)) else: intval = space.int_w(w_value) for name, _ in unroll_parameters: if name == key and name != 'enable_opts': jit.set_param(None, name, intval) break else: raise oefmt(space.w_TypeError, "no JIT parameter '%s'", key)
def _realize_c_struct_or_union(ffi, sindex): s = ffi.ctxobj.ctx.c_struct_unions[sindex] type_index = rffi.getintfield(s, 'c_type_index') if ffi.cached_types[type_index] is not None: return ffi.cached_types[type_index] #found already in the "primary" slot space = ffi.space w_ctype = None c_flags = rffi.getintfield(s, 'c_flags') c_first_field_index = rffi.getintfield(s, 'c_first_field_index') if (c_flags & cffi_opcode.F_EXTERNAL) == 0: if (c_flags & cffi_opcode.F_UNION) != 0: name = _realize_name("union ", s.c_name) x = ctypestruct.W_CTypeUnion(space, name) else: name = _realize_name("struct ", s.c_name) x = ctypestruct.W_CTypeStruct(space, name) if (c_flags & cffi_opcode.F_OPAQUE) == 0: assert c_first_field_index >= 0 w_ctype = x w_ctype.size = rffi.getintfield(s, 'c_size') w_ctype.alignment = rffi.getintfield(s, 'c_alignment') # w_ctype._field_list and other underscore fields are still # None, making it a "lazy" (i.e. "non-forced") kind of struct w_ctype._lazy_ffi = ffi w_ctype._lazy_s = s else: assert c_first_field_index < 0 else: assert c_first_field_index < 0 x = _fetch_external_struct_or_union(s, ffi.included_ffis_libs) if x is None: raise oefmt(ffi.w_FFIError, "'%s %s' should come from ffi.include() but was not found", "union" if c_flags & cffi_opcode.F_UNION else "struct", rffi.charp2str(s.c_name)) assert isinstance(x, ctypestruct.W_CTypeStructOrUnion) if (c_flags & cffi_opcode.F_OPAQUE) == 0 and x.size < 0: prefix = "union" if c_flags & cffi_opcode.F_UNION else "struct" name = rffi.charp2str(s.c_name) raise oefmt(space.w_NotImplementedError, "'%s %s' is opaque in the ffi.include(), but no " "longer in the ffi doing the include (workaround: don't " "use ffi.include() but duplicate the declarations of " "everything using %s %s)", prefix, name, prefix, name) # Update the "primary" OP_STRUCT_UNION slot ffi.cached_types[type_index] = x if w_ctype is not None and rffi.getintfield(s, 'c_size') == -2: # oops, this struct is unnamed and we couldn't generate # a C expression to get its size. We have to rely on # complete_struct_or_union() to compute it now. try: do_realize_lazy_struct(w_ctype) except: ffi.cached_types[type_index] = None raise return x
def __init__(self, space, cdata, ctype, w_callable, w_error, w_onerror): W_CData.__init__(self, space, cdata, ctype) # if not space.is_true(space.callable(w_callable)): raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, "expected a callable object for 'onerror', not %T", w_onerror) self.w_onerror = w_onerror # fresult = self.getfunctype().ctitem size = fresult.size if size < 0: size = 0 elif fresult.is_primitive_integer and size < SIZE_OF_FFI_ARG: size = SIZE_OF_FFI_ARG with lltype.scoped_alloc(rffi.CCHARP.TO, size, zero=True) as ll_error: if not space.is_none(w_error): convert_from_object_fficallback(fresult, ll_error, w_error, self.decode_args_from_libffi) self.error_string = rffi.charpsize2str(ll_error, size) # # We must setup the GIL here, in case the callback is invoked in # some other non-Pythonic thread. This is the same as cffi on # CPython, or ctypes. if space.config.translation.thread: from pypy.module.thread.os_thread import setup_threads setup_threads(space)
def descr_rpartition(self, space, w_sub): from pypy.objspace.std.bytearrayobject import W_BytearrayObject value = self._val(space) if self._use_rstr_ops(space, w_sub): sub = self._op_val(space, w_sub) sublen = len(sub) if sublen == 0: raise oefmt(space.w_ValueError, "empty separator") pos = value.rfind(sub) else: sub = _get_buffer(space, w_sub) sublen = sub.getlength() if sublen == 0: raise oefmt(space.w_ValueError, "empty separator") pos = rfind(value, sub, 0, len(value)) if pos != -1 and isinstance(self, W_BytearrayObject): w_sub = self._new_from_buffer(sub) if pos == -1: if isinstance(self, W_BytearrayObject): self = self._new(value) return space.newtuple([self._empty(), self._empty(), self]) else: return space.newtuple( [self._sliced(space, value, 0, pos, self), w_sub, self._sliced(space, value, pos + sublen, len(value), self)])
def next(self): if self.dictimplementation is None: return EMPTY space = self.space if self.len != self.dictimplementation.length(): self.len = -1 # Make this error state sticky raise oefmt(space.w_RuntimeError, "dictionary changed size during iteration") # look for the next entry if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 if self.strategy is self.dictimplementation.strategy: return result # common case else: # waaa, obscure case: the strategy changed, but not the # length of the dict. The (key, value) pair in 'result' # might be out-of-date. We try to explicitly look up # the key in the dict. if TP == 'key' or TP == 'value': return result w_key = result[0] w_value = self.dictimplementation.getitem(w_key) if w_value is None: self.len = -1 # Make this error state sticky raise oefmt(space.w_RuntimeError, "dictionary changed during iteration") return (w_key, w_value) # no more entries self.dictimplementation = None return EMPTY
def decodeslice(self, space, w_slice): if not space.isinstance_w(w_slice, space.w_slice): raise oefmt(space.w_TypeError, "index must be int or slice") letter = self.shape.itemcode if letter != 'c': raise oefmt(space.w_TypeError, "only 'c' arrays support slicing") w_start = space.getattr(w_slice, space.wrap('start')) w_stop = space.getattr(w_slice, space.wrap('stop')) w_step = space.getattr(w_slice, space.wrap('step')) if space.is_w(w_start, space.w_None): start = 0 else: start = space.int_w(w_start) if space.is_w(w_stop, space.w_None): stop = self.length else: stop = space.int_w(w_stop) if not space.is_w(w_step, space.w_None): step = space.int_w(w_step) if step != 1: raise oefmt(space.w_ValueError, "no step support") if not (0 <= start <= stop <= self.length): raise oefmt(space.w_ValueError, "slice out of bounds") if not self.ll_buffer: raise segfault_exception(space, "accessing a freed array") return start, stop
def __init__(self, space, ctype, w_callable, w_error, w_onerror): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_ExternPython.__init__(self, space, raw_closure, ctype, w_callable, w_error, w_onerror) self.key_pycode = space._try_fetch_pycode(w_callable) # cif_descr = self.getfunctype().cif_descr if not cif_descr: raise oefmt(space.w_NotImplementedError, "%s: callback with unsupported argument or " "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) unique_id = self.hide_object() res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise oefmt(space.w_SystemError, "libffi failed to build this callback") if closure_ptr.c_user_data != unique_id: raise oefmt(space.w_SystemError, "ffi_prep_closure(): bad user_data (it seems that the " "version of the libffi library seen at runtime is " "different from the 'ffi.h' file seen at compile-time)")
def __init__(self, space, args): num_args = len(args) if not (2 <= num_args <= NPY.MAXARGS): raise oefmt(space.w_ValueError, "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) self.seq = [convert_to_array(space, w_elem) for w_elem in args] self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, len(self.seq), parse_op_flag) self.shape = shape_agreement_multiple(space, self.seq, shape=None) self.order = NPY.CORDER self.iters = [] self.index = 0 try: self.size = support.product_check(self.shape) except OverflowError as e: raise oefmt(space.w_ValueError, "broadcast dimensions too large.") for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) self.done = False pass
def create_all_slots(w_self, hasoldstylebase): space = w_self.space dict_w = w_self.dict_w if "__slots__" not in dict_w: wantdict = True wantweakref = True else: wantdict = False wantweakref = False w_slots = dict_w["__slots__"] if space.isinstance_w(w_slots, space.w_str) or space.isinstance_w(w_slots, space.w_unicode): slot_names_w = [w_slots] else: slot_names_w = space.unpackiterable(w_slots) for w_slot_name in slot_names_w: slot_name = space.str_w(w_slot_name) if slot_name == "__dict__": if wantdict or w_self.hasdict: raise oefmt(space.w_TypeError, "__dict__ slot disallowed: we already got one") wantdict = True elif slot_name == "__weakref__": if wantweakref or w_self.weakrefable: raise oefmt(space.w_TypeError, "__weakref__ slot disallowed: we already got one") wantweakref = True else: create_slot(w_self, slot_name) wantdict = wantdict or hasoldstylebase if wantdict: create_dict_slot(w_self) if wantweakref: create_weakref_slot(w_self) if "__del__" in dict_w: w_self.needsdel = True
def _prepare_slice_args(self, space, w_idx): if space.isinstance_w(w_idx, space.w_str): idx = space.str_w(w_idx) dtype = self.dtype if not dtype.is_record(): raise oefmt(space.w_IndexError, "only integers, slices (`:`), " "ellipsis (`...`), numpy.newaxis (`None`) and integer or " "boolean arrays are valid indices") elif idx not in dtype.fields: raise oefmt(space.w_ValueError, "field named %s not found", idx) return RecordChunk(idx) elif (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): if len(self.get_shape()) == 0: raise oefmt(space.w_ValueError, "cannot slice a 0-d array") return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif isinstance(w_idx, W_NDimArray) and w_idx.is_scalar(): w_idx = w_idx.get_scalar_value().item(space) if not space.isinstance_w(w_idx, space.w_int) and \ not space.isinstance_w(w_idx, space.w_bool): raise OperationError(space.w_IndexError, space.wrap( "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) result = [] i = 0 for w_item in space.fixedview(w_idx): if space.is_w(w_item, space.w_None): result.append(NewAxisChunk()) else: result.append(Chunk(*space.decode_index4(w_item, self.get_shape()[i]))) i += 1 return Chunks(result)
def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): raise oefmt(space.w_ValueError, "op_axes must be a tuple/list matching the number of ops") op_axes = space.listview(w_op_axes) oa_ndim = -1 for w_axis in op_axes: if not space.is_none(w_axis): axis_len = space.len_w(w_axis) if oa_ndim == -1: oa_ndim = axis_len elif axis_len != oa_ndim: raise oefmt(space.w_ValueError, "Each entry of op_axes must have the same size") self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) if oa_ndim == -1: raise oefmt(space.w_ValueError, "If op_axes is provided, at least one list of axes " "must be contained within it") raise oefmt(space.w_NotImplementedError, "op_axis not finished yet") # Check that values make sense: # - in bounds for each operand # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions # - no repeat axis # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 return oa_ndim
def semlock_release(self, space): if self.kind == RECURSIVE_MUTEX: sem_post(self.handle) return if HAVE_BROKEN_SEM_GETVALUE: # We will only check properly the maxvalue == 1 case if self.maxvalue == 1: # make sure that already locked try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise # it is already locked as expected else: # it was not locked so undo wait and raise sem_post(self.handle) raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: # This check is not an absolute guarantee that the semaphore does # not rise above maxvalue. if sem_getvalue(self.handle) >= self.maxvalue: raise oefmt(space.w_ValueError, "semaphore or lock released too many times") sem_post(self.handle)
def descr_fromhex(space, w_bytearraytype, w_hexstring): hexstring = space.str_w(w_hexstring) hexstring = hexstring.lower() data = [] length = len(hexstring) i = -2 while True: i += 2 while i < length and hexstring[i] == ' ': i += 1 if i >= length: break if i + 1 == length: raise oefmt(space.w_ValueError, NON_HEX_MSG, i) top = _hex_digit_to_int(hexstring[i]) if top == -1: raise oefmt(space.w_ValueError, NON_HEX_MSG, i) bot = _hex_digit_to_int(hexstring[i+1]) if bot == -1: raise oefmt(space.w_ValueError, NON_HEX_MSG, i + 1) data.append(chr(top*16 + bot)) # in CPython bytearray.fromhex is a staticmethod, so # we ignore w_type and always return a bytearray return new_bytearray(space, space.w_bytearray, data)
def fmt_c(self, w_value): self.prec = -1 # just because space = self.space if space.isinstance_w(w_value, space.w_str): s = space.str_w(w_value) if len(s) != 1: raise oefmt(space.w_TypeError, "%c requires int or char") self.std_wp(s) elif space.isinstance_w(w_value, space.w_unicode): if not do_unicode: raise NeedUnicodeFormattingError ustr = space.unicode_w(w_value) if len(ustr) != 1: raise oefmt(space.w_TypeError, "%c requires int or unichar") self.std_wp(ustr) else: n = space.int_w(w_value) if do_unicode: try: c = unichr(n) except ValueError: raise oefmt(space.w_OverflowError, "unicode character code out of range") self.std_wp(c) else: try: s = chr(n) except ValueError: raise oefmt(space.w_OverflowError, "character code not in range(256)") self.std_wp(s)
def descr_setstate(self, space, w_data): if self.fields is None: # if builtin dtype return space.w_None version = space.int_w(space.getitem(w_data, space.wrap(0))) if version != 3: raise oefmt(space.w_ValueError, "can't handle version %d of numpy.dtype pickle", version) endian = space.str_w(space.getitem(w_data, space.wrap(1))) if endian == NPY.NATBYTE: endian = NPY.NATIVE w_subarray = space.getitem(w_data, space.wrap(2)) w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") self.byteorder = endian self.shape = [] self.subdtype = None self.base = self if w_subarray != space.w_None: if not space.isinstance_w(w_subarray, space.w_tuple) or \ space.len_w(w_subarray) != 2: raise oefmt(space.w_ValueError, "incorrect subarray in __setstate__") subdtype, w_shape = space.fixedview(w_subarray) assert isinstance(subdtype, W_Dtype) if not support.issequence_w(space, w_shape): self.shape = [space.int_w(w_shape)] else: self.shape = [space.int_w(w_s) for w_s in space.fixedview(w_shape)] self.subdtype = subdtype self.base = subdtype.base if w_names != space.w_None: self.names = [] self.fields = {} for w_name in space.fixedview(w_names): name = space.str_w(w_name) value = space.getitem(w_fields, w_name) dtype = space.getitem(value, space.wrap(0)) assert isinstance(dtype, W_Dtype) offset = space.int_w(space.getitem(value, space.wrap(1))) self.names.append(name) self.fields[name] = offset, dtype self.itemtype = types.RecordType() if self.is_flexible(): self.elsize = size self.alignment = alignment
def execve(space, w_command, w_args, w_env): """ execve(path, args, env) Execute a path with arguments and environment, replacing current process. path: path of executable file args: iterable of arguments env: dictionary of strings mapping to strings """ command = fsencode_w(space, w_command) try: args_w = space.unpackiterable(w_args) if len(args_w) < 1: raise oefmt(space.w_ValueError, "execv() must have at least one argument") args = [fsencode_w(space, w_arg) for w_arg in args_w] except OperationError as e: if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, "execv() arg 2 must be an iterable of strings") # if w_env is None: # when called via execv() above try: os.execv(command, args) except OSError as e: raise wrap_oserror(space, e) else: env = _env2interp(space, w_env) try: os.execve(command, args, env) except OSError as e: raise wrap_oserror(space, e)
def poll(self, space, w_timeout): if space.is_w(w_timeout, space.w_None): timeout = -1 else: # we want to be compatible with cpython and also accept things # that can be casted to integer (I think) try: # compute the integer w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) if self.running: raise oefmt(space.w_RuntimeError, "concurrent poll() invocation") self.running = True try: retval = rpoll.poll(self.fddict, timeout) except rpoll.PollError as e: w_errortype = space.fromcache(Cache).w_error message = e.get_msg() raise OperationError(w_errortype, space.newtuple([space.wrap(e.errno), space.wrap(message)])) finally: self.running = False retval_w = [] for fd, revents in retval: retval_w.append(space.newtuple([space.wrap(fd), space.wrap(revents)])) return space.newlist(retval_w)
def get_primitive_type(ffi, num): space = ffi.space if not (0 <= num < cffi_opcode._NUM_PRIM): if num == cffi_opcode._UNKNOWN_PRIM: raise oefmt(ffi.w_FFIError, "primitive integer type with an " "unexpected size (or not an integer type at all)") elif num == cffi_opcode._UNKNOWN_FLOAT_PRIM: raise oefmt(ffi.w_FFIError, "primitive floating-point type with an " "unexpected size (or not a float type at all)") elif num == cffi_opcode._UNKNOWN_LONG_DOUBLE: raise oefmt(ffi.w_FFIError, "primitive floating-point type is " "'long double', not supported for now with " "the syntax 'typedef double... xxx;'") else: raise oefmt(space.w_NotImplementedError, "prim=%d", num) realize_cache = space.fromcache(RealizeCache) w_ctype = realize_cache.all_primitives[num] if w_ctype is None: if num == cffi_opcode.PRIM_VOID: w_ctype = newtype.new_void_type(space) else: assert RealizeCache.NAMES[num] w_ctype = newtype.new_primitive_type(space, RealizeCache.NAMES[num]) realize_cache.all_primitives[num] = w_ctype return w_ctype
def convert_array_from_object(self, cdata, w_ob): space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): self._convert_array_from_listview(cdata, w_ob) elif (self.can_cast_anything or (self.ctitem.is_primitive_integer and self.ctitem.size == rffi.sizeof(lltype.Char))): if not space.isinstance_w(w_ob, space.w_str): raise self._convert_error("str or list or tuple", w_ob) s = space.str_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: raise oefmt(space.w_IndexError, "initializer string is too long for '%s' (got %d " "characters)", self.name, n) copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar): if not space.isinstance_w(w_ob, space.w_unicode): raise self._convert_error("unicode or list or tuple", w_ob) s = space.unicode_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: raise oefmt(space.w_IndexError, "initializer unicode string is too long for '%s' " "(got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: unichardata[n] = u'\x00' else: raise self._convert_error("list or tuple", w_ob)
def load_extension_module(space, path, name): if os.sep not in path: path = os.curdir + os.sep + path # force a '/' in the path state = space.fromcache(State) if state.find_extension(name, path) is not None: return old_context = state.package_context state.package_context = name, path try: from rpython.rlib import rdynload try: ll_libname = rffi.str2charp(path) try: dll = rdynload.dlopen(ll_libname) finally: lltype.free(ll_libname, flavor="raw") except rdynload.DLOpenError, e: raise oefmt(space.w_ImportError, "unable to load extension module '%s': %s", path, e.msg) try: initptr = rdynload.dlsym(dll, "init%s" % (name.split(".")[-1],)) except KeyError: raise oefmt(space.w_ImportError, "function init%s not found in library %s", name, path) initfunc = rffi.cast(initfunctype, initptr) generic_cpy_call(space, initfunc) state.check_and_raise_exception()
def new_enum_type(space, name, w_enumerators, w_enumvalues, w_basectype): enumerators_w = space.fixedview(w_enumerators) enumvalues_w = space.fixedview(w_enumvalues) if len(enumerators_w) != len(enumvalues_w): raise oefmt(space.w_ValueError, "tuple args must have the same size") enumerators = [space.str_w(w) for w in enumerators_w] # if (not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned) and not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveUnsigned)): raise oefmt(space.w_TypeError, "expected a primitive signed or unsigned base type") # lvalue = lltype.malloc(rffi.CCHARP.TO, w_basectype.size, flavor='raw') try: for w in enumvalues_w: # detects out-of-range or badly typed values w_basectype.convert_from_object(lvalue, w) finally: lltype.free(lvalue, flavor='raw') # size = w_basectype.size align = w_basectype.align if isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned): enumvalues = [space.int_w(w) for w in enumvalues_w] ctype = ctypeenum.W_CTypeEnumSigned(space, name, size, align, enumerators, enumvalues) else: enumvalues = [space.uint_w(w) for w in enumvalues_w] ctype = ctypeenum.W_CTypeEnumUnsigned(space, name, size, align, enumerators, enumvalues) return ctype
def utime(space, w_path, w_tuple): """ utime(path, (atime, mtime)) utime(path, None) Set the access and modified time of the file to the given values. If the second form is used, set the access and modified times to the current time. """ if space.is_w(w_tuple, space.w_None): try: dispatch_filename(rposix.utime, 1)(space, w_path, None) return except OSError as e: raise wrap_oserror2(space, e, w_path) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" args_w = space.fixedview(w_tuple) if len(args_w) != 2: raise oefmt(space.w_TypeError, msg) actime = space.float_w(args_w[0], allow_conversion=False) modtime = space.float_w(args_w[1], allow_conversion=False) dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) except OSError as e: raise wrap_oserror2(space, e, w_path) except OperationError as e: if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, msg)
def _new_array_type(space, w_ctptr, length): _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise oefmt(space.w_TypeError, "first arg must be a pointer ctype") arrays = w_ctptr._array_types if arrays is None: arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) w_ctptr._array_types = arrays else: ctype = arrays.get(length) if ctype is not None: return ctype # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", ctitem.name) if length < 0: assert length == -1 arraysize = -1 extra = '[]' else: try: arraysize = ovfcheck(length * ctitem.size) except OverflowError: raise oefmt(space.w_OverflowError, "array size would overflow a ssize_t") extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) arrays.set(length, ctype) return ctype
def int(self, space): raise oefmt(space.w_TypeError, "can't convert complex to int")
def descr_del___abstractmethods__(space, w_type): w_type = _check(space, w_type) if not w_type.deldictvalue(space, "__abstractmethods__"): raise oefmt(space.w_AttributeError, "__abstractmethods__") w_type.set_abstract(False)
def descr_set__bases__(space, w_type, w_value): # this assumes all app-level type objects are W_TypeObject w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__bases__", w_type) if not space.isinstance_w(w_value, space.w_tuple): raise oefmt(space.w_TypeError, "can only assign tuple to %N.__bases__, not %T", w_type, w_value) newbases_w = space.fixedview(w_value) if len(newbases_w) == 0: raise oefmt(space.w_TypeError, "can only assign non-empty tuple to %N.__bases__, not ()", w_type) for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): if w_type in w_newbase.compute_default_mro(): raise oefmt(space.w_TypeError, "a __bases__ item causes an inheritance cycle") w_oldbestbase = check_and_find_best_base(space, w_type.bases_w) w_newbestbase = check_and_find_best_base(space, newbases_w) oldlayout = w_oldbestbase.get_full_instance_layout() newlayout = w_newbestbase.get_full_instance_layout() if oldlayout != newlayout: raise oefmt( space.w_TypeError, "__bases__ assignment: '%N' object layout differs from " "'%N'", w_newbestbase, w_oldbestbase) # invalidate the version_tag of all the current subclasses w_type.mutated(None) # now we can go ahead and change 'w_type.bases_w' saved_bases_w = w_type.bases_w temp = [] try: for w_oldbase in saved_bases_w: if isinstance(w_oldbase, W_TypeObject): w_oldbase.remove_subclass(w_type) w_type.bases_w = newbases_w for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): w_newbase.add_subclass(w_type) # try to recompute all MROs mro_subclasses(space, w_type, temp) except: for cls, old_mro in temp: cls.mro_w = old_mro w_type.bases_w = saved_bases_w raise if (space.config.objspace.std.withtypeversion and w_type.version_tag() is not None and not is_mro_purely_of_types(w_type.mro_w)): # Disable method cache if the hierarchy isn't pure. w_type._version_tag = None for w_subclass in w_type.get_subclasses(): if isinstance(w_subclass, W_TypeObject): w_subclass._version_tag = None assert w_type.w_same_layout_as is get_parent_layout(w_type) # invariant
def descr_init(self, space, w_buffer, encoding=None, w_errors=None, w_newline=None, line_buffering=0): self.state = STATE_ZERO self.w_buffer = w_buffer self.w_encoding = _determine_encoding(space, encoding) if space.is_none(w_errors): w_errors = space.newtext("strict") self.w_errors = w_errors if space.is_none(w_newline): newline = None else: newline = space.utf8_w(w_newline) if newline and newline not in ('\n', '\r\n', '\r'): raise oefmt(space.w_ValueError, "illegal newline value: %R", w_newline) self.line_buffering = line_buffering self.readuniversal = not newline # null or empty self.readtranslate = newline is None self.readnl = newline self.writetranslate = (newline != '') if not self.readuniversal: self.writenl = self.readnl if self.writenl == '\n': self.writenl = None elif _WINDOWS: self.writenl = "\r\n" else: self.writenl = None # build the decoder object if space.is_true(space.call_method(w_buffer, "readable")): w_codec = interp_codecs.lookup_codec(space, space.text_w(self.w_encoding)) self.w_decoder = space.call_method(w_codec, "incrementaldecoder", w_errors) if self.readuniversal: self.w_decoder = space.call_function( space.gettypeobject(W_IncrementalNewlineDecoder.typedef), self.w_decoder, space.newbool(self.readtranslate)) # build the encoder object if space.is_true(space.call_method(w_buffer, "writable")): w_codec = interp_codecs.lookup_codec(space, space.text_w(self.w_encoding)) self.w_encoder = space.call_method(w_codec, "incrementalencoder", w_errors) self.seekable = space.is_true(space.call_method(w_buffer, "seekable")) self.telling = self.seekable self.encoding_start_of_stream = False if self.seekable and self.w_encoder: self.encoding_start_of_stream = True w_cookie = space.call_method(self.w_buffer, "tell") if not space.eq_w(w_cookie, space.newint(0)): self.encoding_start_of_stream = False space.call_method(self.w_encoder, "setstate", space.newint(0)) self.state = STATE_OK
def seek_w(self, space, w_pos, whence=0): self._check_attached(space) if not self.seekable: raise oefmt(space.w_IOError, "underlying stream is not seekable") if whence == 1: # seek relative to current position if not space.eq_w(w_pos, space.newint(0)): raise oefmt(space.w_IOError, "can't do nonzero cur-relative seeks") # Seeking to the current position should attempt to sync the # underlying buffer with the current position. w_pos = space.call_method(self, "tell") elif whence == 2: # seek relative to end of file if not space.eq_w(w_pos, space.newint(0)): raise oefmt(space.w_IOError, "can't do nonzero end-relative seeks") space.call_method(self, "flush") self.decoded.reset() self.snapshot = None if self.w_decoder: space.call_method(self.w_decoder, "reset") return space.call_method(self.w_buffer, "seek", w_pos, space.newint(whence)) elif whence != 0: raise oefmt(space.w_ValueError, "invalid whence (%d, should be 0, 1 or 2)", whence) if space.is_true(space.lt(w_pos, space.newint(0))): raise oefmt(space.w_ValueError, "negative seek position %R", w_pos) space.call_method(self, "flush") # The strategy of seek() is to go back to the safe start point and # replay the effect of read(chars_to_skip) from there. cookie = PositionCookie(space.bigint_w(w_pos)) # Seek back to the safe start point space.call_method(self.w_buffer, "seek", space.newint(cookie.start_pos)) self.decoded.reset() self.snapshot = None # Restore the decoder to its state from the safe start point. if self.w_decoder: self._decoder_setstate(space, cookie) if cookie.chars_to_skip: # Just like _read_chunk, feed the decoder and save a snapshot. w_chunk = space.call_method(self.w_buffer, "read", space.newint(cookie.bytes_to_feed)) if not space.isinstance_w(w_chunk, space.w_bytes): msg = "underlying read() should have returned " \ "a bytes object, not '%T'" raise oefmt(space.w_TypeError, msg, w_chunk) self.snapshot = PositionSnapshot(cookie.dec_flags, space.bytes_w(w_chunk)) w_decoded = space.call_method(self.w_decoder, "decode", w_chunk, space.newbool(bool(cookie.need_eof))) w_decoded = check_decoded(space, w_decoded) # Skip chars_to_skip of the decoded characters if space.len_w(w_decoded) < cookie.chars_to_skip: raise oefmt(space.w_IOError, "can't restore logical file position") self.decoded.set(space, w_decoded) self.decoded.pos = w_decoded._index_to_byte(cookie.chars_to_skip) else: self.snapshot = PositionSnapshot(cookie.dec_flags, "") # Finally, reset the encoder (merely useful for proper BOM handling) if self.w_encoder: self._encoder_setstate(space, cookie) return w_pos
def decode_w(self, space, w_input, final=False): if self.w_decoder is None: raise oefmt(space.w_ValueError, "IncrementalNewlineDecoder.__init__ not called") # decode input (with the eventual \r from a previous pass) if not space.is_w(self.w_decoder, space.w_None): w_output = space.call_method(self.w_decoder, "decode", w_input, space.newbool(bool(final))) else: w_output = w_input if not space.isinstance_w(w_output, space.w_unicode): raise oefmt(space.w_TypeError, "decoder should return a string result") output, output_len = space.utf8_len_w(w_output) output_len = len(output) if self.pendingcr and (final or output_len): output = '\r' + output self.pendingcr = False output_len += 1 # retain last \r even when not translating data: # then readline() is sure to get \r\n in one pass if not final and output_len > 0: last = len(output) - 1 assert last >= 0 if output[last] == '\r': output = output[:last] self.pendingcr = True output_len -= 1 if output_len == 0: return space.newutf8("", 0) # Record which newlines are read and do newline translation if # desired, all in one pass. seennl = self.seennl if output.find('\r') < 0: # If no \r, quick scan for a possible "\n" character. # (there's nothing else to be done, even when in translation mode) if output.find('\n') >= 0: seennl |= SEEN_LF # Finished: we have scanned for newlines, and none of them # need translating. elif not self.translate: i = 0 while i < len(output): if seennl == SEEN_ALL: break c = output[i] i += 1 if c == '\n': seennl |= SEEN_LF elif c == '\r': if i < len(output) and output[i] == '\n': seennl |= SEEN_CRLF i += 1 else: seennl |= SEEN_CR elif output.find('\r') >= 0: # Translate! builder = StringBuilder(len(output)) i = 0 while i < output_len: c = output[i] i += 1 if c == '\n': seennl |= SEEN_LF elif c == '\r': if i < len(output) and output[i] == '\n': seennl |= SEEN_CRLF i += 1 else: seennl |= SEEN_CR builder.append('\n') continue builder.append(c) output = builder.build() self.seennl |= seennl lgt = check_utf8(output, True) return space.newutf8(output, lgt)
def _check_attached(self, space): if self.state == STATE_DETACHED: raise oefmt(space.w_ValueError, "underlying buffer has been detached") self._check_init(space)
def _check_init(self, space): if self.state == STATE_ZERO: raise oefmt(space.w_ValueError, "I/O operation on uninitialized object")
def tell_w(self, space): self._check_closed(space) if not self.seekable: raise oefmt(space.w_IOError, "underlying stream is not seekable") if not self.telling: raise oefmt(space.w_IOError, "telling position disabled by next() call") self._writeflush(space) space.call_method(self, "flush") w_pos = space.call_method(self.w_buffer, "tell") if self.w_decoder is None or self.snapshot is None: assert not self.decoded.text return w_pos cookie = PositionCookie(space.bigint_w(w_pos)) # Skip backward to the snapshot point (see _read_chunk) cookie.dec_flags = self.snapshot.flags input = self.snapshot.input cookie.start_pos -= len(input) # How many decoded characters have been used up since the snapshot? if not self.decoded.pos: # We haven't moved from the snapshot point. return space.newlong_from_rbigint(cookie.pack()) chars_to_skip = codepoints_in_utf8(self.decoded.text, end=self.decoded.pos) # Starting from the snapshot position, we will walk the decoder # forward until it gives us enough decoded characters. w_saved_state = space.call_method(self.w_decoder, "getstate") try: # Note our initial start point self._decoder_setstate(space, cookie) # Feed the decoder one byte at a time. As we go, note the nearest # "safe start point" before the current location (a point where # the decoder has nothing buffered, so seek() can safely start # from there and advance to this location). chars_decoded = 0 i = 0 while i < len(input): w_decoded = space.call_method(self.w_decoder, "decode", space.newbytes(input[i])) check_decoded(space, w_decoded) chars_decoded += space.len_w(w_decoded) cookie.bytes_to_feed += 1 w_state = space.call_method(self.w_decoder, "getstate") w_dec_buffer, w_flags = space.unpackiterable(w_state, 2) dec_buffer_len = space.len_w(w_dec_buffer) if dec_buffer_len == 0 and chars_decoded <= chars_to_skip: # Decoder buffer is empty, so this is a safe start point. cookie.start_pos += cookie.bytes_to_feed chars_to_skip -= chars_decoded assert chars_to_skip >= 0 cookie.dec_flags = space.int_w(w_flags) cookie.bytes_to_feed = 0 chars_decoded = 0 if chars_decoded >= chars_to_skip: break i += 1 else: # We didn't get enough decoded data; signal EOF to get more. w_decoded = space.call_method(self.w_decoder, "decode", space.newbytes(""), space.newint(1)) # final=1 check_decoded(space, w_decoded) chars_decoded += space.len_w(w_decoded) cookie.need_eof = 1 if chars_decoded < chars_to_skip: raise oefmt(space.w_IOError, "can't reconstruct logical file position") finally: space.call_method(self.w_decoder, "setstate", w_saved_state) # The returned cookie corresponds to the last safe start point. cookie.chars_to_skip = chars_to_skip return space.newlong_from_rbigint(cookie.pack())
def check_decoded(space, w_decoded): if not space.isinstance_w(w_decoded, space.w_unicode): msg = "decoder should return a string result, not '%T'" raise oefmt(space.w_TypeError, msg, w_decoded) return w_decoded
def parsestr(space, encoding, s): """Parses a string or unicode literal, and return usually a wrapped value. If we get an f-string, then instead return an unparsed but unquoted W_FString instance. If encoding=None, the source string is ascii only. In other cases, the source string is in utf-8 encoding. When a bytes string is returned, it will be encoded with the original encoding. Yes, it's very inefficient. Yes, CPython has very similar code. """ # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 quote = s[ps] rawmode = False unicode_literal = True saw_u = False saw_f = False # string decoration handling if quote == 'b' or quote == 'B': ps += 1 quote = s[ps] unicode_literal = False elif quote == 'u' or quote == 'U': ps += 1 quote = s[ps] saw_u = True elif quote == 'r' or quote == 'R': ps += 1 quote = s[ps] rawmode = True elif quote == 'f' or quote == 'F': ps += 1 quote = s[ps] saw_f = True if not saw_u: if quote == 'r' or quote == 'R': ps += 1 quote = s[ps] rawmode = True elif quote == 'b' or quote == 'B': ps += 1 quote = s[ps] unicode_literal = False elif quote == 'f' or quote == 'F': ps += 1 quote = s[ps] saw_f = True if quote != "'" and quote != '"': raise_app_valueerror(space, 'Internal error: parser passed unquoted literal') ps += 1 q = len(s) - 1 if s[q] != quote: raise_app_valueerror( space, 'Internal error: parser passed unmatched ' 'quotes in literal') if q - ps >= 4 and s[ps] == quote and s[ps + 1] == quote: # triple quotes ps += 2 if s[q - 1] != quote or s[q - 2] != quote: raise_app_valueerror( space, 'Internal error: parser passed ' 'unmatched triple quotes in literal') q -= 2 if unicode_literal and not rawmode: # XXX Py_UnicodeFlag is ignored for now assert 0 <= ps <= q if saw_f: return W_FString(s[ps:q], rawmode) if encoding is None: substr = s[ps:q] else: substr = decode_unicode_utf8(space, s, ps, q) v = unicodehelper.decode_unicode_escape(space, substr) return space.newunicode(v) assert 0 <= ps <= q substr = s[ps:q] if not unicode_literal: # Disallow non-ascii characters (but not escapes) for c in substr: if ord(c) > 0x80: raise oefmt( space.w_SyntaxError, "bytes can only contain ASCII literal characters.") if rawmode or '\\' not in substr: if not unicode_literal: return space.newbytes(substr) elif saw_f: return W_FString(substr, rawmode) else: v = unicodehelper.decode_utf8(space, substr) return space.newunicode(v) v = PyString_DecodeEscape(space, substr, 'strict', encoding) return space.newbytes(v)
def PyString_DecodeEscape(space, s, errors, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, the string is UTF-8 encoded and should be re-encoded in the specified encoding. """ builder = StringBuilder(len(s)) ps = 0 end = len(s) while ps < end: if s[ps] != '\\': # note that the C code has a label here. # the logic is the same. if recode_encoding and ord(s[ps]) & 0x80: w, ps = decode_utf8_recode(space, s, ps, end, recode_encoding) # Append bytes to output buffer. builder.append(w) else: builder.append(s[ps]) ps += 1 continue ps += 1 if ps == end: raise_app_valueerror(space, 'Trailing \\ in string') prevps = ps ch = s[ps] ps += 1 # XXX This assumes ASCII! if ch == '\n': pass elif ch == '\\': builder.append('\\') elif ch == "'": builder.append("'") elif ch == '"': builder.append('"') elif ch == 'b': builder.append("\010") elif ch == 'f': builder.append('\014') # FF elif ch == 't': builder.append('\t') elif ch == 'n': builder.append('\n') elif ch == 'r': builder.append('\r') elif ch == 'v': builder.append('\013') # VT elif ch == 'a': builder.append('\007') # BEL, not classic C elif ch in '01234567': # Look for up to two more octal digits span = ps span += (span < end) and (s[span] in '01234567') span += (span < end) and (s[span] in '01234567') octal = s[prevps:span] # emulate a strange wrap-around behavior of CPython: # \400 is the same as \000 because 0400 == 256 num = int(octal, 8) & 0xFF builder.append(chr(num)) ps = span elif ch == 'x': if ps + 2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]): hexa = s[ps:ps + 2] num = int(hexa, 16) builder.append(chr(num)) ps += 2 else: if errors == 'strict': raise_app_valueerror( space, "invalid \\x escape at position %d" % (ps - 2)) elif errors == 'replace': builder.append('?') elif errors == 'ignore': pass else: raise oefmt( space.w_ValueError, "decoding error; " "unknown error handling code: %s", errors) if ps + 1 <= end and isxdigit(s[ps]): ps += 1 else: # this was not an escape, so the backslash # has to be added, and we start over in # non-escape mode. builder.append('\\') ps -= 1 assert ps >= 0 continue # an arbitry number of unescaped UTF-8 bytes may follow. buf = builder.build() return buf
def readline_w(self, space, w_limit=None): # For backwards compatibility, a (slowish) readline(). limit = convert_size(space, w_limit) has_peek = space.findattr(self, space.newtext("peek")) builder = StringBuilder() size = 0 while limit < 0 or size < limit: nreadahead = 1 if has_peek: try: w_readahead = space.call_method(self, "peek", space.newint(1)) except OperationError as e: if trap_eintr(space, e): continue raise if not space.isinstance_w(w_readahead, space.w_bytes): raise oefmt( space.w_IOError, "peek() should have returned a bytes object, " "not '%T'", w_readahead) length = space.len_w(w_readahead) if length > 0: n = 0 buf = space.bytes_w(w_readahead) if limit >= 0: while True: if n >= length or n >= limit: break n += 1 if buf[n - 1] == '\n': break else: while True: if n >= length: break n += 1 if buf[n - 1] == '\n': break nreadahead = n try: w_read = space.call_method(self, "read", space.newint(nreadahead)) except OperationError as e: if trap_eintr(space, e): continue raise if not space.isinstance_w(w_read, space.w_bytes): raise oefmt( space.w_IOError, "peek() should have returned a bytes object, not " "'%T'", w_read) read = space.bytes_w(w_read) if not read: break size += len(read) builder.append(read) if read[-1] == '\n': break return space.newbytes(builder.build())
def output_slice(space, rwbuffer, target_pos, data): if target_pos + len(data) > rwbuffer.getlength(): raise oefmt(space.w_RuntimeError, "target buffer has shrunk during operation") rwbuffer.setslice(target_pos, data)
def flush_w(self, space): if self._CLOSED(): raise oefmt(space.w_ValueError, "I/O operation on closed file")
def getstate_w(self, space): raise oefmt(space.w_TypeError, "cannot serialize '%T' object", self)
def descr_len(self, space): if self.builder is None: raise oefmt(space.w_ValueError, "no length of built builder") return space.newint(self.builder.getlength())
def writebuf_w(self, space): raise oefmt(space.w_TypeError, "cannot use unicode as modifiable buffer")
def _check_done(self, space): if self.builder is None: raise oefmt(space.w_ValueError, "Can't operate on a built builder")
def descr_append_slice(self, space, s, start, end): self._check_done(space) if not 0 <= start <= end <= len(s): raise oefmt(space.w_ValueError, "bad start/stop") self.builder.append_slice(s, start, end)
def cdlopen_close(self): raise oefmt(self.ffi.w_FFIError, "library '%s' was not created with ffi.dlopen()", self.libname)
def _build_attr(self, attr): index = parse_c_type.search_in_globals(self.ctx, attr) if index < 0: for ffi1, lib1 in self.ffi.included_ffis_libs: if lib1 is not None: try: w_result = lib1._get_attr_elidable(attr) break # found, break out of this loop except KeyError: w_result = lib1._build_attr(attr) if w_result is not None: break # found, break out of this loop else: w_result = ffi1.fetch_int_constant(attr) if w_result is not None: break # found, break out of this loop else: return None # not found at all else: space = self.space g = self.ctx.c_globals[index] op = getop(g.c_type_op) if (op == cffi_opcode.OP_CPYTHON_BLTN_V or op == cffi_opcode.OP_CPYTHON_BLTN_N or op == cffi_opcode.OP_CPYTHON_BLTN_O): # A function w_result = self._build_cpython_func(g, attr) # elif op == cffi_opcode.OP_GLOBAL_VAR: # A global variable of the exact type specified here # (nowadays, only used by the ABI mode or backend # compatibility; see OP_GLOBAL_F for the API mode w_ct = realize_c_type.realize_c_type(self.ffi, self.ctx.c_types, getarg(g.c_type_op)) g_size = rffi.cast(lltype.Signed, g.c_size_or_direct_fn) if g_size != w_ct.size and g_size != 0 and w_ct.size > 0: raise oefmt( self.ffi.w_FFIError, "global variable '%s' should be %d bytes " "according to the cdef, but is actually %d", attr, w_ct.size, g_size) ptr = rffi.cast(rffi.CCHARP, g.c_address) if not ptr: # for dlopen() style ptr = self.cdlopen_fetch(attr) w_result = cglob.W_GlobSupport(space, attr, w_ct, ptr=ptr) # elif op == cffi_opcode.OP_GLOBAL_VAR_F: w_ct = realize_c_type.realize_c_type(self.ffi, self.ctx.c_types, getarg(g.c_type_op)) w_result = cglob.W_GlobSupport(space, attr, w_ct, fetch_addr=g.c_address) # elif (op == cffi_opcode.OP_CONSTANT_INT or op == cffi_opcode.OP_ENUM): # A constant integer whose value, in an "unsigned long long", # is obtained by calling the function at g->address w_result = realize_c_type.realize_global_int( self.ffi, g, index) # elif (op == cffi_opcode.OP_CONSTANT or op == cffi_opcode.OP_DLOPEN_CONST): # A constant which is not of integer type w_ct = realize_c_type.realize_c_type(self.ffi, self.ctx.c_types, getarg(g.c_type_op)) fetch_funcptr = rffi.cast(realize_c_type.FUNCPTR_FETCH_CHARP, g.c_address) if w_ct.size <= 0: raise oefmt( self.ffi.w_FFIError, "constant '%s' is of type '%s', " "whose size is not known", attr, w_ct.name) raise oefmt(space.w_SystemError, "constant has no known size") if not fetch_funcptr: # for dlopen() style assert op == cffi_opcode.OP_DLOPEN_CONST ptr = self.cdlopen_fetch(attr) else: assert op == cffi_opcode.OP_CONSTANT ptr = lltype.malloc(rffi.CCHARP.TO, w_ct.size, flavor='raw') self.ffi._finalizer.free_mems.append(ptr) fetch_funcptr(ptr) w_result = w_ct.convert_to_object(ptr) # elif op == cffi_opcode.OP_DLOPEN_FUNC: # For dlopen(): the function of the given 'name'. We use # dlsym() to get the address of something in the dynamic # library, which we interpret as being exactly a function of # the specified type. ptr = self.cdlopen_fetch(attr) w_ct = realize_c_type.realize_c_type_or_func( self.ffi, self.ctx.c_types, getarg(g.c_type_op)) # must have returned a function type: assert isinstance(w_ct, realize_c_type.W_RawFuncType) w_ctfnptr = w_ct.unwrap_as_fnptr(self.ffi) w_result = W_CData(self.space, ptr, w_ctfnptr) # else: raise oefmt(space.w_NotImplementedError, "in lib_build_attr: op=%d", op) assert w_result is not None self.dict_w[attr] = w_result return w_result
def externpy_not_found(ffi, name): raise oefmt(ffi.w_FFIError, "ffi.def_extern('%s'): no 'extern \"Python\"' " "function with this name", name)
def descr_delattr(self, w_attr): self._get_attr(w_attr) # for the possible AttributeError raise oefmt(self.space.w_AttributeError, "C attribute cannot be deleted")
def descr_init(self, space, w_buffer, encoding=None, w_errors=None, w_newline=None, line_buffering=0, write_through=0): self.state = STATE_ZERO self.w_buffer = w_buffer self.w_encoding = _determine_encoding(space, encoding, w_buffer) if space.is_none(w_errors): w_errors = space.newtext("strict") self.w_errors = w_errors if space.is_none(w_newline): newline = None else: newline = space.unicode_w(w_newline) if newline and newline not in (u'\n', u'\r\n', u'\r'): raise oefmt(space.w_ValueError, "illegal newline value: %R", w_newline) self.line_buffering = line_buffering self.write_through = write_through self.readuniversal = not newline # null or empty self.readtranslate = newline is None self.readnl = newline self.writetranslate = (newline != u'') if not self.readuniversal: self.writenl = self.readnl if self.writenl == u'\n': self.writenl = None elif _WINDOWS: self.writenl = u"\r\n" else: self.writenl = None w_codec = interp_codecs.lookup_codec(space, space.text_w(self.w_encoding)) if not space.is_true( space.getattr(w_codec, space.newtext('_is_text_encoding'))): msg = ("%R is not a text encoding; " "use codecs.open() to handle arbitrary codecs") raise oefmt(space.w_LookupError, msg, self.w_encoding) # build the decoder object if space.is_true(space.call_method(w_buffer, "readable")): self.w_decoder = space.call_method(w_codec, "incrementaldecoder", w_errors) if self.readuniversal: self.w_decoder = space.call_function( space.gettypeobject(W_IncrementalNewlineDecoder.typedef), self.w_decoder, space.newbool(self.readtranslate)) # build the encoder object if space.is_true(space.call_method(w_buffer, "writable")): self.w_encoder = space.call_method(w_codec, "incrementalencoder", w_errors) self.seekable = space.is_true(space.call_method(w_buffer, "seekable")) self.telling = self.seekable self.has_read1 = space.findattr(w_buffer, space.newtext("read1")) self.encoding_start_of_stream = False if self.seekable and self.w_encoder: self.encoding_start_of_stream = True w_cookie = space.call_method(self.w_buffer, "tell") if not space.eq_w(w_cookie, space.newint(0)): self.encoding_start_of_stream = False space.call_method(self.w_encoder, "setstate", space.newint(0)) self.state = STATE_OK
def bf_getwritebuffer(space, w_buf, segment, ref): if segment != 0: raise oefmt(space.w_SystemError, "accessing non-existent segment") buf = space.writebuf_w(w_buf) ref[0] = buf.get_raw_address() return len(buf)
def newmemoryview(space, w_obj, itemsize, format, w_shape=None, w_strides=None): ''' newmemoryview(buf, itemsize, format, shape=None, strides=None) ''' if not space.isinstance_w(w_obj, space.w_memoryview): raise oefmt(space.w_ValueError, "memoryview expected") # minimal error checking lgt = space.len_w(w_obj) old_size = w_obj.getitemsize() nbytes = lgt * old_size if w_shape: tot = 1 shape = [] for w_v in space.listview(w_shape): v = space.int_w(w_v) shape.append(v) tot *= v if tot * itemsize != nbytes: raise oefmt( space.w_ValueError, "shape/itemsize %s/%d does not match obj len/itemsize %d/%d", str(shape), itemsize, lgt, old_size) else: if itemsize == 0: raise oefmt(space.w_ValueError, "cannot guess shape when itemsize==0") if nbytes % itemsize != 0: raise oefmt(space.w_ValueError, "itemsize %d does not match obj len/itemsize %d/%d", itemsize, lgt, old_size) shape = [ nbytes / itemsize, ] ndim = len(shape) if w_strides: strides = [] for w_v in space.listview(w_strides): v = space.int_w(w_v) strides.append(v) if not w_shape and len(strides) != 1: raise oefmt(space.w_ValueError, "strides must have one value if shape not provided") if len(strides) != ndim: raise oefmt(space.w_ValueError, "shape %s does not match strides %s", str(shape), str(strides)) else: # start from the right, c-order layout strides = [itemsize] * ndim for v in range(ndim - 2, -1, -1): strides[v] = strides[v + 1] * shape[v + 1] # check that the strides are not too big for i in range(ndim): if strides[i] * shape[i] > nbytes: raise oefmt(space.w_ValueError, "shape %s and strides %s exceed object size %d", shape, strides, nbytes) view = space.buffer_w(w_obj, 0) return space.newmemoryview( FormatBufferViewND(view, itemsize, format, ndim, shape, strides))
def getfunctype(self): ctype = self.ctype if not isinstance(ctype, W_CTypeFunc): space = self.space raise oefmt(space.w_TypeError, "expected a function ctype") return ctype
def _precheck_for_new(space, w_type): if not isinstance(w_type, W_TypeObject): raise oefmt(space.w_TypeError, "X is not a type object (%T)", w_type) return w_type
def descr_buffer(self, space, w_flags): if type(self) is W_Bufferable: raise oefmt(space.w_ValueError, "override __buffer__ in a subclass") return space.call_method(self, '__buffer__', w_flags)