def fix_ipython_startup(fn): """ Attempt to fix IPython startup to not print (Bool_t)1 """ BADSTR = 'TPython::Exec( "" )' GOODSTR = 'TPython::Exec( "" );' if sys.version_info[0] < 3: consts = fn.im_func.func_code.co_consts else: consts = fn.__code__.co_consts if BADSTR not in consts: return idx = consts.index(BADSTR) orig_refcount = sys.getrefcount(consts) del consts PyTuple_SetItem = ctypes.pythonapi.PyTuple_SetItem PyTuple_SetItem.argtypes = (ctypes.py_object, ctypes.c_size_t, ctypes.py_object) if sys.version_info[0] < 3: consts = ctypes.py_object(fn.im_func.func_code.co_consts) else: consts = ctypes.py_object(fn.im_func.__code__.co_consts) for _ in range(orig_refcount - 2): ctypes.pythonapi.Py_DecRef(consts) try: ctypes.pythonapi.Py_IncRef(GOODSTR) PyTuple_SetItem(consts, idx, GOODSTR) finally: for _ in range(orig_refcount - 2): ctypes.pythonapi.Py_IncRef(consts)
def sample(inimage): out = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1) CDLL(join(DN, '_sample.so')).sample( py_object(inimage), py_object(out), ) return out
def on_chain_updated(self, chain_head, committed_batches=None, uncommitted_batches=None): """ The existing chain has been updated, the current head block has changed. :param chain_head: the new head of block_chain, can be None if no block publishing is desired. :param committed_batches: the set of batches that were committed as part of the new chain. :param uncommitted_batches: the list of transactions if any that are now de-committed when the new chain was selected. :return: None """ try: self._py_call( 'on_chain_updated', ctypes.py_object(chain_head), ctypes.py_object(committed_batches), ctypes.py_object(uncommitted_batches)) # pylint: disable=broad-except except Exception: LOGGER.exception( "Unhandled exception in BlockPublisher.on_chain_updated")
def datetime_data(dtype): """Return (unit, numerator, denominator, events) from a datetime dtype """ try: import ctypes except ImportError: raise RuntimeError, "Cannot access date-time internals without ctypes installed" if dtype.kind not in ['m','M']: raise ValueError, "Not a date-time dtype" obj = dtype.metadata[METADATA_DTSTR] class DATETIMEMETA(ctypes.Structure): _fields_ = [('base', ctypes.c_int), ('num', ctypes.c_int), ('den', ctypes.c_int), ('events', ctypes.c_int)] func = ctypes.pythonapi.PyCObject_AsVoidPtr func.argtypes = [ctypes.py_object] func.restype = ctypes.c_void_p result = func(ctypes.py_object(obj)) result = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(DATETIMEMETA)) struct = result[0] base = struct.base # FIXME: This needs to be kept consistent with enum in ndarrayobject.h from numpy.core.multiarray import DATETIMEUNITS obj = ctypes.py_object(DATETIMEUNITS) result = func(obj) _unitnum2name = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(ctypes.c_char_p)) return (_unitnum2name[base], struct.num, struct.den, struct.events)
def _async_raise(tid, excobj): ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(excobj)) while ret > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) sleep(0.1) ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(excobj))
def ptr_record(self, ip, timeout=1.0, af=AF_INET): if self.__state != 'new': raise ValueError('bad state') self.__state = 'request' self.__query_common(timeout) self.__branch = getcurrent() ip = ip.encode('ascii') if af == AF_INET: addr = _In_Addr() if 0 >= _lib.dns_pton(af, ip, ctypes.addressof(addr)): raise ValueError('Not a valid AF_INET address: %s' % ip) callback = _QueryPtr.Callback(self._ptr_callback) self.__query = _lib.dns_submit_a4ptr( _resolverservice.context, ctypes.addressof(addr), callback, ctypes.py_object(self.__branch)) elif af == AF_INET6: addr = _In6_Addr() if 0 >= _lib.dns_pton(af, ip, ctypes.addressof(addr)): raise ValueError('Not a valid AF_INET6 address: %s' % ip) callback = _QueryPtr.Callback(self._ptr_callback) self.__query = _lib.dns_submit_a6ptr( _resolverservice.context, ctypes.addressof(addr), callback, ctypes.py_object(self.__branch)) else: raise TypeError('bad address family') self.__af = af return _resolverservice.switch('request')
def handle_write(self, s): # UNTESTED AS OF YET. # Without access to the C structure associated with the buffer, we # have no other way of determining what the address of the given data # which we can use to write from is. writeBufferPtr = c_char_p() bytesToWrite = c_int() fmt = self.binary and "s#" or "t#" ret = pythonapi.PyArg_ParseTuple(py_object((s,)), c_char_p(fmt), byref(bPtr), byref(bLen)) if ret == 0: # This sould be a Python error. raise WinError() bytesWritten = DWORD() ov = OVERLAPPED() ov.Offset = self.offset ov.channel = stackless.channel() self.ensure_iocp_association() ret = WriteFile(self.handle, writeBufferPtr, bytesToWrite, byref(bytesWritten), byref(ov)) if ret == 0: # Error. if windll.kernel32.GetLastError() != ERROR_IO_PENDING: # This should raise. pythonapi.PyErr_SetExcFromWindowsErrWithFilename(py_object(IOError), 0, c_char_p(self.filename)) # Windows is processing our IO request and will get back to us. iocpMgr.RegisterChannelObject(self, ov.channel) ov.channel.receive() if bytesToWrite != bytesWritten.value: # This should raise. Same check as done in the actual file # object code. raise WinError()
def __init__( self, block_store, block_cache, block_validator, state_database, chain_head_lock, state_pruning_block_depth=1000, data_dir=None, observers=None ): super(ChainController, self).__init__('chain_controller_drop') if data_dir is None: data_dir = '' if observers is None: observers = [] _pylibexec( 'chain_controller_new', ctypes.py_object(block_store), ctypes.py_object(block_cache), ctypes.py_object(block_validator), state_database.pointer, chain_head_lock.pointer, ctypes.py_object(observers), ctypes.c_long(state_pruning_block_depth), ctypes.c_char_p(data_dir.encode()), ctypes.byref(self.pointer))
def proxy_builtin(cls): name = cls.__name__ slots = getattr(cls, '__dict__', name) pointer = _PyObjectPointer.from_address(id(slots)) namespace = {} ctypes.pythonapi.PyDict_SetItem(ctypes.py_object(namespace), ctypes.py_object(name), pointer.dict) return namespace[name]
def greymap(img_in, img_out, mapping): if len(mapping) != 256: raise Exception("Greyscale mapping must providing mapping of 256 bytes") array_type = ctypes.c_ubyte * 256 cmodules.cgreymap_mod._wrap_greymap(ctypes.py_object(img_in), ctypes.py_object(img_out), array_type(*mapping))
def g(i): if i == 1: p = py_object(123) elif i == 2: p = py_object() else: p = None return p
def reveal_dict(proxy): if not isinstance(proxy, DictProxyType): raise TypeError('dictproxy expected') dp = _DictProxy.from_address(id(proxy)) ns = {} ctypes.pythonapi.PyDict_SetItem(ctypes.py_object(ns), ctypes.py_object(None), dp.dict) return ns[None]
def f(): frame = inspect.currentframe() x = 1 locals()["x"] = 10 ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), 0) print "x =", x locals()["x"] = 100 locals()["y"] = 20 ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), 0) print "x =", x, ", y =", y
def intdim(inmat): out0 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) out1 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) out2 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) CDLL(join(DN, '_intdim.so'), mode=RTLD_GLOBAL).intdim( py_object(inmat), py_object(out0), py_object(out1), py_object(out2), ) return out0, out1, out2
def test_specialize_prebuilt(self): five = py_object(5) hello = py_object("hello") def fn(i): return [five, hello][i] res = interpret(fn, [0]) assert res.c_data[0]._obj.value == 5 res = interpret(fn, [1]) assert res.c_data[0]._obj.value == "hello"
def intdim(inmat): # Uses kernelsize 61, sd = 10, at the moment out0 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) out1 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) out2 = cv.CreateMat(inmat.rows, inmat.cols, cv.CV_32FC1) CDLL(join(DN, '_intdim.so'), mode=RTLD_GLOBAL).intdim( py_object(inmat), py_object(out0), py_object(out1), py_object(out2), ) return out0, out1, out2
def test_annotate_prebuilt(self): five = py_object(5) hello = py_object("hello") def fn(i): return [five, hello][i] a = RPythonAnnotator() s = a.build_types(fn, [int]) assert s.knowntype == py_object if conftest.option.view: a.translator.view()
def get_not_implemented(): namespace = {} name = "_Py_NotImplmented" not_implemented = ctypes.cast( ctypes.pythonapi._Py_NotImplementedStruct, ctypes.py_object) ctypes.pythonapi.PyDict_SetItem( ctypes.py_object(namespace), ctypes.py_object(name), not_implemented ) return namespace[name]
def get_class_dict(cls): d = getattr(cls, '__dict__', None) if hasattr(d, 'pop'): return d if d is None: raise TypeError('given class does not have a dictionary') setitem = ctypes.pythonapi.PyDict_SetItem ns = {} # Reveal dict behind DictProxy dp = _DictProxy.from_address(id(d)) setitem(ctypes.py_object(ns), ctypes.py_object(None), dp.dict) return ns[None]
def patchable_builtin(klass): name = klass.__name__ target = getattr(klass, "__dict__", name) if not isinstance(target, DictProxyType): return target proxy_dict = SlotsProxy.from_address(id(target)) namespace = {} ctypes.pythonapi.PyDict_SetItem(ctypes.py_object(namespace), ctypes.py_object(name), proxy_dict.dict) return namespace[name]
def colorsplit(inimage): redgreen = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1) blueyellow = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1) luminance = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1) saturation = cv.CreateMat(inimage.height, inimage.width, cv.CV_32FC1) CDLL(join(DN, '_colorsplit.so')).colorsplit( py_object(inimage), py_object(luminance), py_object(saturation), py_object(redgreen), py_object(blueyellow), ) return luminance, saturation, redgreen, blueyellow
def patchable_builtin(klass): name = klass.__name__ target = getattr(klass, '__dict__', name) proxy_dict = SlotsProxy.from_address(id(target)) namespace = {} ctypes.pythonapi.PyDict_SetItem( ctypes.py_object(namespace), ctypes.py_object(name), proxy_dict.dict, ) return namespace[name]
def get_old_data(self): #cur_his=self.conn.cursor(cursor_factory=extras.DictCursor) cur_his=self.conn.cursor() self.b_new=False d = ask_list('WBS拷贝器', 1) if not d: self.b_new=False else: self.b_new=True str="','".join(d) cur_his.execute("select elevator_type,req_configure_finish,req_delivery_date,contract_id,project_id,project_name,lift_no,project_catalog,is_urgent, name,instance_id,action_id,flow_ser,start_date,finish_date,is_active, \ to_char((select load from s_unit_parameter where wbs_no=instance_id),'9999') as load,to_char((select speed from s_unit_parameter where wbs_no=instance_id),'9D99') as speed,total_flow,action_name from v_task_out1 WHERE instance_id in ('"+str+"') AND (workflow_id = 'WF0002' OR workflow_id='WF0006') order by instance_id, flow_ser asc;") rows_his = cur_his.fetchall() #for row in rows_his: # print(row) self.rows_lib_new2 = lib.combine_new_list(ctypes.py_object(rows_his)) cur_his.execute("select *, to_char((select load from s_unit_parameter where wbs_no=v_history_units_data.wbs_no),'9999') as load,\ to_char((select speed from s_unit_parameter where wbs_no=v_history_units_data.wbs_no),'9D99') as speed from v_history_units_data order by wbs_no asc;") #col_names = [cn[0] for cn in cur_his.description] rows_his = cur_his.fetchall() #print(len(rows_his)) self.rows_lib= lib.combine_list(ctypes.py_object(rows_his)) cur_his.execute("select elevator_type,req_configure_finish,req_delivery_date,contract_id,project_id,project_name,lift_no,project_catalog, is_urgent, name,instance_id,action_id,flow_ser,start_date,finish_date,is_active, \ to_char((select load from s_unit_parameter where wbs_no=instance_id),'9999') as load,to_char((select speed from s_unit_parameter where wbs_no=instance_id),'9D99') as speed,total_flow,action_name from v_task_out1 WHERE status =8 AND (workflow_id = 'WF0002' or workflow_id='WF0006') order by instance_id, flow_ser asc;") rows_his = cur_his.fetchall() rows_temp=lib.combine_new_list(ctypes.py_object(rows_his)) self.rows_lib.extend(rows_temp) cur_his.execute("select elevator_type,req_configure_finish,req_delivery_date,contract_id,project_id,project_name,lift_no,project_catalog,is_urgent, name,instance_id,action_id,flow_ser,start_date,finish_date,is_active, \ to_char((select load from s_unit_parameter where wbs_no=instance_id),'9999') as load,to_char((select speed from s_unit_parameter where wbs_no=instance_id),'9D99') as speed,total_flow,action_name from v_task_out1 WHERE status>=1 AND status<8 and status!=4 AND (workflow_id = 'WF0002' OR workflow_id='WF0006') order by instance_id, flow_ser asc;") rows_his = cur_his.fetchall() '''test i=0 s = '' for r in rows_his: print(r[10]) if r[10]==s: continue else: s=r[10] i+=1 print(i) ''' self.rows_lib_new=lib.combine_new_list(ctypes.py_object(rows_his))
def patchable_builtin(klass): name = klass.__name__ target = klass.__dict__ proxy_dict = SlotsProxy.from_address(id(target)) namespace = {} # This code casts `proxy_dict.dict` into a python object and # `from_address()` returns `py_object` ctypes.pythonapi.PyDict_SetItem( ctypes.py_object(namespace), ctypes.py_object(name), proxy_dict.dict, ) return namespace[name]
def class_dict(klass): """proxy for class __dict__ through CPython. not quite pythonic.""" name = klass.__name__ slots = getattr(klass, "__dict__") pointer = SlotsPointer.from_address(id(slots)) # not exactly sure why we need this dictionary but hey, it works namespace = {} ctypes.pythonapi.PyDict_SetItem(ctypes.py_object(namespace), ctypes.py_object(name), pointer.dict) return namespace[name]
def __init__(self, block_manager, transaction_executor, block_status_store, permission_verifier, view_factory): super(BlockValidator, self).__init__("block_validator_drop") _to_exception(PY_LIBRARY.call("block_validator_new", block_manager.pointer, ctypes.py_object(transaction_executor), block_status_store.pointer, ctypes.py_object(permission_verifier), view_factory.pointer, ctypes.byref(self.pointer)))
def _async_raise(tid, excobj): res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(excobj)) if res == 0: raise ValueError("nonexistent thread id") elif res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) raise SystemError("PyThreadState_SetAsyncExc failed")
def interrupt(self): """Interrupts the execution of the executor if possible. Makes a best-effort attempt to interrupt the execution of the executor. An attempt is made to interrupt any blocking system calls, however if the execution is inside native code this may not be succesful. Long-running native code-computations will also not be interrupted. Once the thread is succesfully interrupted, execution finishes as per normal by emitting the ::statement-complete and ::complete signals, except that the state of the interrupted statement will be Statement.INTERRUPTED, and subsequence statements will have a state of Statement.COMPILE_SUCCESS. Calling interrupt() more than once will have no effect. """ # See note in __run_thread() as to why we need to lock and why we need to # protect against sending the KeyboardInterrupt exception more than once self.lock.acquire() if not self.complete and not self.interrupted: self.interrupted = True _PyThreadState_SetAsyncExc(self.tid, ctypes.py_object(KeyboardInterrupt)) if _pthread_kill is not None: _pthread_kill(self.tid, signal.SIGUSR1) self.lock.release()
def register_event_callback(self, callback, *args): """ Registers a python callback to receive general GEIS events. """ self._event_curry = ctypes.py_object((self, callback, args)) _geis_register_event_callback(self._instance, self._cb_wrapper, self._event_curry)
def start(self, output): """ Starts the encoder object writing to the specified output """ self.event.clear() self.stopped = False self.exception = None self._open_output(output) self.output_port[0].userdata = ct.cast(ct.pointer(ct.py_object(self)), ct.c_void_p) mmal_check( mmal.mmal_port_enable(self.output_port, _encoder_callback), prefix="Failed to enable encoder output port" ) for q in range(mmal.mmal_queue_length(self.pool[0].queue)): buf = mmal.mmal_queue_get(self.pool[0].queue) if not buf: raise PiCameraRuntimeError("Unable to get a required buffer from pool queue") mmal_check( mmal.mmal_port_send_buffer(self.output_port, buf), prefix="Unable to send a buffer to encoder output port", ) b = mmal.MMAL_BOOL_T() mmal_check( mmal.mmal_port_parameter_get_boolean(self.camera_port, mmal.MMAL_PARAMETER_CAPTURE, b), prefix="Failed to query capture status", ) self.started_capture = not bool(b) if self.started_capture: mmal_check( mmal.mmal_port_parameter_set_boolean(self.camera_port, mmal.MMAL_PARAMETER_CAPTURE, mmal.MMAL_TRUE), prefix="Failed to start capture", )
def nearest(self, geom): if self._n_geoms == 0: return None envelope = geom.envelope def callback(item1, item2, distance, userdata): try: geom1 = ctypes.cast(item1, ctypes.py_object).value geom2 = ctypes.cast(item2, ctypes.py_object).value dist = ctypes.cast(distance, ctypes.POINTER(ctypes.c_double)) lgeos.GEOSDistance(geom1._geom, geom2._geom, dist) return 1 except: return 0 item = lgeos.GEOSSTRtree_nearest_generic(self._tree_handle, ctypes.py_object(geom), envelope._geom, \ lgeos.GEOSDistanceCallback(callback), None) result = ctypes.cast(item, ctypes.py_object).value return result
def get_landmarks(self) -> List[Point]: coordinates = list() exception = make_exception() self._dll_handle.RawSample_getLandmarks(self._impl, py_object(coordinates), assign_floats_vector_func, exception) check_exception(exception, self._dll_handle) points = list() for i in range(int(len(coordinates) / DIMENSIONS_NUMBER)): points.append( Point(coordinates[i * DIMENSIONS_NUMBER + 0], coordinates[i * DIMENSIONS_NUMBER + 1], coordinates[i * DIMENSIONS_NUMBER + 2])) return points
def raise_on_thread(t, exctype): '''Raises an exception in the threads with id tid''' for ctid, tobj in threading._active.items(): if tobj is t: tid = ctid break else: raise Exception("Could not find thread") if not inspect.isclass(exctype): raise TypeError("Only types can be raised (not instances)") res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # "if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) raise SystemError("PyThreadState_SetAsyncExc failed")
def async_raise(tid, exctype=Exception): """ Raise an Exception in the Thread with id `tid`. Perform cleanup if needed. Based on Killable Threads By Tomer Filiba from http://tomerfiliba.com/recipes/Thread2/ license: public domain. """ assert isinstance(tid, int), 'Invalid thread id: must an integer' tid = ctypes.c_long(tid) exception = ctypes.py_object(Exception) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exception) if res == 0: raise ValueError('Invalid thread id.') elif res != 1: # if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) raise SystemError('PyThreadState_SetAsyncExc failed.')
def run(self): cUserdata = ctypes.cast(ctypes.pointer(ctypes.py_object(self)), ctypes.c_void_p) key = """""" try: import license key = license.key except Exception as ex: logging.warning("Failed to loading license key!!") ret = vspc.FtVspcApiInit(self._vspc_event_cb, cUserdata, key) logging.info("FtVspcApiInit: {0}".format(ret)) self._cUserdata = cUserdata if not ret: logging.fatal("Failed to Initialize VSPC Library: {0}".format( vspc.GetLastErrorMessage())) os.exit(0) return ret = vspc.FtVspcGetInfo() logging.info("FtVspcGetInfo: {0}".format(ret)) # self.add_by_num(4, Handler(name)) while not self._thread_stop: time.sleep(1) for handler in self._handlers: try: info = json.dumps(handler.as_dict()) self._mqtt_stream_pub.vspc_status(handler.get_port_key(), info) except Exception as ex: logging.exception(ex) # print('timespan::::::::::::', time.time() - self._heartbeat_timeout) if self._enable_heartbeat and time.time( ) > self._heartbeat_timeout: pass self.clean_all() vspc.FtVspcApiClose() logging.warning("Close VSPC Library!!!")
def _interrupt_pool(self) -> None: """Interrupt all tasks in the backing `pool`, if any.""" if self._pool is None: return # Terminate the pool self._pool.terminate() if self.scheduler == "threads": # `ThreadPool.terminate()` doesn't stop running tasks, only # prevents new tasks from running. In CPython we can attempt to # raise an exception in all threads. This exception will be raised # the next time the task does something with the Python api. # However, if the task is currently blocked in a c extension, it # will not immediately be interrupted. There isn't a good way # around this unfortunately. import platform if platform.python_implementation() != "CPython": self.logger.warning( "Interrupting a running threadpool is only supported in CPython, " "all currently running tasks will continue to completion" ) return self.logger.info("Attempting to interrupt and cancel all running tasks...") import sys import ctypes # signature of this method changed in python 3.7 if sys.version_info >= (3, 7): id_type = ctypes.c_ulong else: id_type = ctypes.c_long for t in self._pool._pool: # type: ignore ctypes.pythonapi.PyThreadState_SetAsyncExc( id_type(t.ident), ctypes.py_object(KeyboardInterrupt) )
def _async_raise(thread_obj, exception): found = False target_tid = 0 for tid, tobj in threading._active.items(): if tobj is thread_obj: found = True target_tid = tid break if not found: raise ValueError("Invalid thread object") ret = ctypes.pythonapi.PyThreadState_SetAsyncExc( target_tid, ctypes.py_object(exception)) if ret == 0: raise ValueError("Invalid thread ID") elif ret > 1: # Por que devemos notificar mais de um thread? # Porque nós temos um buraco no interpretador no nível da linguagem C. # Então é melhor limpar a bagunça. ctypes.pythonapi.PyThreadState_SetAsyncExc(target_tid, 0) raise SystemError("PyThreadState_SetAsyncExc failed")
def terminate_thread(thread): # From http://code.activestate.com/recipes/496960-thread2-killable-threads/ """Terminates a python thread from another thread. :param thread: a thread """ if not thread: return if not thread.isAlive(): return exc = ctypes.py_object(SystemExit) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), exc) if res == 0: raise ValueError("nonexistent thread id") elif res > 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed")
def _dev_read_thread(hid_device): """Binds a device to the thread's run loop, then starts the run loop. Args: hid_device: The MacOsHidDevice object The HID manager requires a run loop to handle Report reads. This thread function serves that purpose. """ # Schedule device events with run loop hid_device.run_loop_ref = cf.CFRunLoopGetCurrent() if not hid_device.run_loop_ref: logger.error("Failed to get current run loop") return iokit.IOHIDDeviceScheduleWithRunLoop(hid_device.handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE) iokit.IOHIDDeviceRegisterRemovalCallback(hid_device.handle, REMOVAL_CALLBACK, ctypes.py_object(hid_device)) # Run the run loop run_loop_run_result = cf.CFRunLoopRunInMode( K_CF_RUNLOOP_DEFAULT_MODE, 4, True # Timeout in seconds ) # Return after source handled # log any unexpected run loop exit if run_loop_run_result != K_CF_RUN_LOOP_RUN_HANDLED_SOURCE: logger.error("Unexpected run loop exit code: %d", run_loop_run_result) # Unschedule from run loop iokit.IOHIDDeviceUnscheduleFromRunLoop(hid_device.handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE)
def eval(self, src): """ Execute code in Julia, then pull some results back to Python. """ if src is None: return None ans = self._call(src) if not ans: return None res = self.api.jl_call2(void_p(self.api.convert), void_p(self.api.PyObject), void_p(ans)) if res is None: self.check_exception("convert(PyCall.PyObject, %s)" % src) self._debug('Need to return None.') if res == 0: self._debug('How can res == 0?') return None boxed_obj = self.api.jl_get_field(void_p(res), b'o') pyobj = self.api.jl_unbox_voidpointer(void_p(boxed_obj)) # make sure we incref it before returning it, # as this is a borrowed reference ctypes.pythonapi.Py_IncRef(ctypes.py_object(pyobj)) return pyobj
def _add_fd_to_loop(self, fd, cb, fd_events, userdata=None): if cb is None: self.logger.info( f"Cannot add fd '{fd}' to pomp loop without a valid callback function" ) return None self.fd_userdata[fd] = userdata userdata = ctypes.cast(ctypes.pointer(ctypes.py_object(userdata)), ctypes.c_void_p) self.c_fd_userdata[fd] = userdata self.pomp_fd_callbacks[fd] = od.pomp_fd_event_cb_t(cb) res = od.pomp_loop_add( self.pomp_loop, ctypes.c_int32(fd), od.uint32_t(int(fd_events)), self.pomp_fd_callbacks[fd], userdata, ) if res != 0: raise RuntimeError( f"Cannot add fd '{fd}' to pomp loop: {os.strerror(-res)} ({res})" )
def ctype_async_raise(thread_obj, exception): found = False target_tid = 0 for tid, tobj in threading._active.items(): if tobj is thread_obj: found = True target_tid = tid break if not found: raise ValueError("Invalid thread object") ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(target_tid, ctypes.py_object(exception)) # ref: http://docs.python.org/c-api/init.html#PyThreadState_SetAsyncExc if ret == 0: raise Mexcept.DebuggerQuit elif ret > 1: # Huh? Why would we notify more than one threads? # Because we punch a hole into C level interpreter. # So it is better to clean up the mess. ctypes.pythonapi.PyThreadState_SetAsyncExc(target_tid, 0) raise SystemError("PyThreadState_SetAsyncExc failed")
def _open_url(self): """ Opening rtsp streaming url """ if self.resource_name.startswith("replay/"): if self.media_name is None: self.logger.error( "Error media_name should be provided in video stream " "replay mode") return False res = od.pdraw_demuxer_new_from_url( self.pdraw, self.url, self.demuxer_cbs, ctypes.cast(ctypes.pointer(ctypes.py_object(self)), ctypes.c_void_p), ctypes.byref(self.pdraw_demuxer)) if res != 0: self.logger.error( f"Error while opening pdraw url: {self.url} ({res})") return False else: self.logger.info(f"Opening pdraw url OK: {self.url}") return True
def from_dlpack(dltensor): """Produce an array from a DLPack tensor without memory copy. Retreives the underlying DLPack tensor's pointer to create an array from the data. Removes the original DLPack tensor's destructor as now the array is responsible for destruction. Parameters ---------- dltensor : DLPack tensor Returns ------- arr: tvm.nd.NDArray The array view of the tensor data. """ dltensor = ctypes.py_object(dltensor) name = ctypes.pythonapi.PyCapsule_GetName(dltensor) ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, name) handle = TVMArrayHandle() check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle))) ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, None) return _make_array(handle, False)
def caget_one(pv, timeout=5, datatype=None, format=FORMAT_RAW, count=0): '''Retrieves a value from a single PV in the requested format. Blocks until the request is complete, raises an exception if any problems occur.''' # Start by converting the timeout into an absolute timeout. This allows # us to do repeated timeouts without actually extending the timeout # deadline. timeout = cothread.AbsTimeout(timeout) # Retrieve the requested channel and ensure it's connected. channel = _channel_cache[pv] channel.Wait(timeout) # A count of zero will be treated by EPICS in a version dependent manner, # either returning the entire waveform (equivalent to count=-1) or a data # dependent waveform length. if count < 0: # Treat negative count request as request for fixed underlying channel # size. count = cadef.ca_element_count(channel) elif count > 0: # Need to ensure we don't ask for more than the channel can provide as # otherwise may get API error. count = min(count, cadef.ca_element_count(channel)) # Assemble the callback context. Note that we need to explicitly # increment the reference count so that the context survives until the # callback routine gets to see it. dbrcode, dbr_to_value = dbr.type_to_dbr(channel, datatype, format) done = cothread.Event() context = (pv, dbr_to_value, done) ctypes.pythonapi.Py_IncRef(context) # Perform the actual put as a non-blocking operation: we wait to be # informed of completion, or time out. cadef.ca_array_get_callback(dbrcode, count, channel, _caget_event_handler, ctypes.py_object(context)) _flush_io() return ca_timeout(done, timeout, pv)
def delete(self): try: parser=reqparse.RequestParser() parser.add_argument('token',type=str,required=True) parser.add_argument('modelUid',type=str,required=True) args=parser.parse_args() if not tokenValidator(args['token']): return {"status":"error","msg":"token error","data":{}},401 args.pop('token') logging.info(f'[API_stopTraining] args:{args}') mid=args['modelUid'] _,_,_,_,_,status,_,_=getModelInfo(mid)[0] if status!='train': return {"status":"error","msg":f"model {mid} is not training","data":{}},400 res=0 for t in threading.enumerate(): if t.name==mid: tid=ctypes.c_long(t.ident) res=ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(SystemExit)) if res==0: logging.info(f'[API_stopTraining] model {mid} not found in thread') return {"status":"error","msg":f"model {mid} not found in thread","data":{}},400 elif res!=1: ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) logging.info(f'[API_stopTraining] something went wrong, can\'t stop training') return {"status":"error","msg":f"something went wrong, can't stop training","data":{}},400 try: db=sql() db.cursor.execute(f"UPDATE `models` SET `status`='fail',`failReason`='user stopped' WHERE `mid`='{mid}';") db.conn.commit() except Exception as e: db.conn.rollback() raise Exception(e) finally: db.conn.close() return {"status":"success","msg":"","data":{}},200 except Exception as e: logging.error(f"[API_stopTraining]{traceback.format_exc()}") return {"status":"error","msg":str(traceback.format_exc()),"data":{}},400
def capture(self, data: Union[RawImage, bytes]) -> List[RawSample]: if isinstance(data, RawImage): data = data.data elif isinstance(data, bytes): data = data else: raise Error(0xfe6034d4, "Wrong type of data") void_result = list() exception = make_exception() self._dll_handle.Capturer_capture_encoded_image( self._impl, c_char_p(data), c_uint32(len(data)), py_object(void_result), assign_pointers_vector_func, exception) check_exception(exception, self._dll_handle) result = list() for el in void_result: result.append(RawSample(self._dll_handle, c_void_p(el))) return result
def caput_one(pv, value, datatype=None, wait=False, timeout=5, callback=None): '''Writes a value to a single pv, waiting for callback on completion if requested.''' # Connect to the channel and wait for connection to complete. timeout = cothread.AbsTimeout(timeout) channel = _channel_cache[pv] channel.Wait(timeout) # Note: the unused value returned below needs to be retained so that # dbr_array, a pointer to C memory, has the right lifetime: it has to # survive until ca_array_put[_callback] has been called. dbrtype, count, dbr_array, value = \ dbr.value_to_dbr(channel, datatype, value) if wait or callback is not None: # Assemble the callback context and give it an extra reference count # to keep it alive until the callback handler sees it. if wait: done = cothread.Event() else: done = None context = (pv, done, callback) ctypes.pythonapi.Py_IncRef(context) # caput with callback requested: need to wait for response from # server before returning. cadef.ca_array_put_callback(dbrtype, count, channel, dbr_array, _caput_event_handler, ctypes.py_object(context)) _flush_io() if wait: ca_timeout(done, timeout, pv) else: # Asynchronous caput, just do it now. cadef.ca_array_put(dbrtype, count, channel, dbr_array) _flush_io() # Return a success code for compatibility with throw=False code. return ca_nothing(pv)
def run(self): ''' run - The thread main. Will attempt to stop and join the attached thread. ''' # Try to silence default exception printing. self.otherThread._Thread__stderr = self._stderr if hasattr(self.otherThread, '_Thread__stop'): # If py2, call this first to start thread termination cleanly. # Python3 does not need such ( nor does it provide.. ) self.otherThread._Thread__stop() while self.otherThread.is_alive(): # We loop raising exception incase it's caught hopefully this breaks us far out. ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception)) self.otherThread.join(self.repeatEvery) try: self._stderr.close() except: pass
def stop(self, exception=ScatterExit, block=True, timeout=None): """ """ try: import ctypes except ImportError: return False else: exc = ctypes.py_object(exception) ident = ctypes.c_long(self.ident) result = ctypes.pythonapi.PyThreadState_SetAsyncExc(ident, exc) # Invalid thread id (?). if result == 0: return False # Error code; revert and fail. if result != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(ident, None) return False return self.join(timeout) if block else True
def async_raise(self, exc_type): """Raise the exception.""" # Should only be called on a started thread so raise otherwise assert self.ident is not None, 'Only started threads have thread identifier' # If the thread has died we don't want to raise an exception so log. if not self.is_alive(): _LOG.debug('Not raising %s because thread %s (%s) is not alive', exc_type, self.name, self.ident) return result = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(self.ident), ctypes.py_object(exc_type)) if result == 0 and self.is_alive(): # Don't raise an exception an error unnecessarily if the thread is dead. raise ValueError('Thread ID was invalid.', self.ident) elif result > 1: # Something bad happened, call with a NULL exception to undo. ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, None) raise RuntimeError( 'Error: PyThreadState_SetAsyncExc %s %s (%s) %s', exc_type, self.name, self.ident, result)
def start(self, output): """ Starts the encoder object writing to the specified output """ self.event.clear() self.stopped = False self.exception = None self._open_output(output) self.output_port[0].userdata = ct.cast( ct.pointer(ct.py_object(self)), ct.c_void_p) mmal_check( mmal.mmal_port_enable(self.output_port, _encoder_callback), prefix="Failed to enable encoder output port") for q in range(mmal.mmal_queue_length(self.pool[0].queue)): buf = mmal.mmal_queue_get(self.pool[0].queue) if not buf: raise PiCameraRuntimeError( "Unable to get a required buffer from pool queue") mmal_check( mmal.mmal_port_send_buffer(self.output_port, buf), prefix="Unable to send a buffer to encoder output port") b = mmal.MMAL_BOOL_T() mmal_check( mmal.mmal_port_parameter_get_boolean( self.camera_port, mmal.MMAL_PARAMETER_CAPTURE, b), prefix="Failed to query capture status") self.started_capture = not bool(b) if self.started_capture: mmal_check( mmal.mmal_port_parameter_set_boolean( self.camera_port, mmal.MMAL_PARAMETER_CAPTURE, mmal.MMAL_TRUE), prefix="Failed to start capture")
async def __create_subscription(self, events, datatype, format, count, connect_timeout): """Creates the channel subscription with the specified parameters: event mask, datatype and format, array count. Waits for the channel to become connected.""" # Need to first wait for the channel to connect before we can do # anything else. This will either succeed, or wait forever, raising # if close() is called await self.__wait_for_channel(connect_timeout) self.state = self.OPEN # Treat a negative count as a request for the complete data if count < 0: count = cadef.ca_element_count(self.channel) # Connect to the channel to be kept informed of connection updates. self.channel._add_subscription(self) # Convert the datatype request into the subscription datatype. dbrcode, self.dbr_to_value = dbr.type_to_dbr(self.channel, datatype, format) # Finally create the subscription with all the requested properties # and hang onto the returned event id as our implicit ctypes # parameter. event_id = ctypes.c_void_p() cadef.ca_create_subscription( dbrcode, count, self.channel, events, self.__on_event, ctypes.py_object(self), ctypes.byref(event_id), ) _flush_io() self._as_parameter_ = event_id.value
def extract(): """Copies the variables of the caller up to iPython. Useful for debugging. .. code-block:: python def f(): x = 'hello world' extract() f() # raises an error print(x) # prints 'hello world' """ import inspect import ctypes frames = inspect.stack() caller = frames[1].frame ipython = [ f for f in inspect.stack() if f.filename.startswith('<ipython-input') ][-1].frame ipython.f_locals.update( {k: v for k, v in caller.f_globals.items() if k[:2] != '__'}) ipython.f_locals.update( {k: v for k, v in caller.f_locals.items() if k[:2] != '__'}) # Magic call to make the updates to f_locals 'stick'. # More info: http://pydev.blogspot.co.uk/2014/02/changing-locals-of-frame-frameflocals.html ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(ipython), ctypes.c_int(0)) message = 'Copied {}\'s variables to {}'.format(caller.f_code.co_name, ipython.f_code.co_name) raise RuntimeError(message)
def _do_plot_on_matplotlib_figure(fig): import ctypes get_plotshape_data = nrn_dll_sym('get_plotshape_data') get_plotshape_data.restype = ctypes.py_object variable, varobj, lo, hi, secs = get_plotshape_data(ctypes.py_object(self._data)) if varobj is not None: variable = varobj kwargs.setdefault('picker', 2) result = _get_pyplot_axis3d(fig) _lines = result._do_plot(lo, hi, secs, variable, *args, **kwargs) result._mouseover_text = '' def _onpick(event): if event.artist in _lines: result._mouseover_text = _lines[event.artist] else: result._mouseover_text = '' return True result.auto_aspect() fig.canvas.mpl_connect('pick_event', _onpick) def format_coord(*args): return result._mouseover_text result.format_coord = format_coord return result
def shutdown_keeper(self): self.keeper.shutdown( ) # HACK: Lifecycle doesn't invoke this as expected self.keeper.lifecycle.terminate("unit test completed") self.thread.join() # HACK: Lifecycle leaks threads; this needs to be fixed in pymaker import ctypes while threading.active_count() > 1: for thread in threading.enumerate(): if thread is not threading.current_thread(): print(f"Attempting to kill thread {thread}") sysexit = ctypes.py_object( SystemExit ) # Creates a C pointer to a Python "SystemExit" exception ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), sysexit) time.sleep(1) # Ensure we don't leak threads, which would break wait_for_other_threads() later on assert threading.active_count() == 1 assert self.get_dai_vat_balance() == Wad(0)
def _async_raise(tid, exctype): """ 功能: - 关闭子线程 - tid: 子线程id - exctype: 退出线程方法 """ try: tid = ctypes.c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( tid, ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") except Exception as e: print(e)
def terminate_thread(self, thread): """ Terminates a python thread from another thread. 中止结束线程 :param thread: a threading.Thread instance """ try: if not thread.isAlive(): return exc = ctypes.py_object(SystemExit) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), exc) if res == 0: raise ValueError("nonexistent thread id") elif res > 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed") except e: pass self.cmd_vel_pub.publish(Twist())
def test_async(n_inner=200, n_repeat=1000): """ n_inner should be larger than check interval by at around 20. It returns a list of for loop count. The first one could be anything below check interval The other ones should be similar. Anything bigger is bad. """ check_interval = sys.getcheckinterval() print 'current check interval', check_interval result = [] for i in range(n_repeat): j = -99 pythonapi.PyThreadState_SetAsyncExc(c_long(thread.get_ident()), py_object(KeyboardInterrupt)) try: for j in range(n_inner): pass except KeyboardInterrupt: result.append(j) for r in result: if r > check_interval: print ' WARNING found: %i > check interval', r return result
def stop_thread(cls, thread_name) -> None: """ Stops a function that was started with TM.run(...) the exception will be caught in kill_a_thread() """ for i, thlis in enumerate(cls.running_threads): if thlis.th_name == thread_name: exc = ctypes.py_object(SystemExit) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(cls.running_threads[i].th.ident), exc) if res == 0: raise ValueError("nonexistent thread id") elif res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc( cls.running_threads[i].th.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed") logger.info('TASK ' + str(cls.running_threads[i].th_name) + ' KILLED!') # un-register task number cls.task_options[cls.running_threads[i].task_option_id] \ .used_number.remove(cls.running_threads[i].given_number) cls.running_threads.remove(cls.running_threads[i]) break