def get_window_modal_handlers(window): """ctypesを使い、windowに登録されている modal handlerのリストを返す。 idnameはUIなら 'UI'、認識できない物なら 'UNKNOWN' となる。 :rtype: list[(Structures.wmEventHandler, str, int, int, int)] """ if not window: return [] addr = window.as_pointer() win = cast(c_void_p(addr), POINTER(wmWindow)).contents handlers = [] ptr = cast(win.modalhandlers.first, POINTER(wmEventHandler)) while ptr: # http://docs.python.jp/3/library/ctypes.html#surprises # この辺りの事には注意する事 handler = ptr.contents area = handler.op_area # NULLの場合はNone region = handler.op_region # NULLの場合はNone idname = 'UNKNOWN' if handler.ui_handle: idname = 'UI' if handler.op: op = handler.op.contents ot = op.type.contents if ot.idname: idname = ot.idname.decode() handlers.append((handler, idname, area, region, handler.op_region_type)) ptr = handler.next return handlers
def run(self, element, context): log_api ("%s" % element.tag) cname = (c_ubyte * OMX_MAX_STRINGNAME_SIZE)() cnamelen = OMX_U32() cnamelen = OMX_MAX_STRINGNAME_SIZE index = OMX_U32() index = 0 err = OMX_ERRORTYPE() log_line () while True: omxerror = OMX_ComponentNameEnum(cast(cname, POINTER(c_char)), cnamelen, index) interror = int(omxerror & 0xffffffff) err = get_string_from_il_enum(interror, "OMX_Error") if (err == "OMX_ErrorNoMore") or (err != "OMX_ErrorNone"): break log_line ("Component at index #%d : %s" \ % (index, cast(cname, c_char_p).value), 1) index = index + 1 if (err == "OMX_ErrorNoMore"): log_result(element.tag, "OMX_ErrorNone") return 0 else: log_result(element.tag, err) return interror
def Run(self, unused_args): """Does the segfaulting.""" if flags.FLAGS.debug: logging.warning("Segfault action requested :(") print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents else: logging.warning("Segfault requested but not running in debug mode.")
def store_sensor_data(self, C=len(variables.CHANNELS)): """Collects the new data at every monitor interval""" # Updates the data array self.edk.EE_DataUpdateHandle(0, self.hData) self.edk.EE_DataGetNumberOfSample(self.hData, self.nSamplesTaken) N = self.nSamplesTaken[0] if N != 0: # Only if we have collected > 0 samples # Create the C-style array for storing the data arr = (ctypes.c_double * N)() ctypes.cast(arr, ctypes.POINTER(ctypes.c_double)) # Create a numpy array and copy the C-style array # data into it. data = np.zeros((N, C)) for sample in range(N): for channel in range(C): self.edk.EE_DataGet(self.hData, variables.CHANNELS[channel], byref(arr), N) data[sample, channel] = arr[sample] # Save data into an internal array self.sensor_data = data
def thumbnail_to_buffer(self): """ Convert the thumbnail data as an RGB buffer. Returns: bytearray: RGB data of the thumbnail. """ self.unpack_thumb() status = ctypes.c_int(0) processed_image = self.libraw.libraw_dcraw_make_mem_thumb( self.data, ctypes.cast( ctypes.addressof(status), ctypes.POINTER(ctypes.c_int), ), ) raise_if_error(status.value) data_pointer = ctypes.cast( processed_image.contents.data, ctypes.POINTER(ctypes.c_byte * processed_image.contents.data_size) ) data = bytearray(data_pointer.contents) self.libraw.libraw_dcraw_clear_mem(processed_image) return data
def run(self): """ All the real work happens herein; we can be called in one of three situations, determined by environment variables. First couple are during the agent Install process, where the domain.h and namespace files need to be created. The third case is the real mccoy, where an agent is actually being started by pmcd/dbpmda and makes use of libpcp_pmda to talk PCP protocol. """ if ('PCP_PYTHON_DOMAIN' in os.environ): self.domain_write() elif ('PCP_PYTHON_PMNS' in os.environ): self.pmns_write(os.environ['PCP_PYTHON_PMNS']) else: self.pmns_refresh() cpmda.pmid_oneline_refresh(self._metric_oneline) cpmda.pmid_longtext_refresh(self._metric_helptext) cpmda.indom_oneline_refresh(self._indom_oneline) cpmda.indom_longtext_refresh(self._indom_helptext) numindoms = len(self._indomtable) ibuf = create_string_buffer(numindoms * sizeof(pmdaIndom)) indoms = cast(ibuf, POINTER(pmdaIndom)) for i in xrange(numindoms): indoms[i] = self._indomtable[i] nummetrics = len(self._metrictable) mbuf = create_string_buffer(nummetrics * sizeof(pmdaMetric)) metrics = cast(mbuf, POINTER(pmdaMetric)) for i in xrange(nummetrics): metrics[i] = self._metrictable[i] cpmda.pmda_dispatch(ibuf.raw, numindoms, mbuf.raw, nummetrics)
def _get_properties(properties, length): """ Convenience Function to get the material properties as a dict and values in a python format. """ result = {} #read all properties for p in [properties[i] for i in range(length)]: #the name p = p.contents key = (str(p.mKey.data.decode("utf-8")).split('.')[1], p.mSemantic) #the data from ctypes import POINTER, cast, c_int, c_float, sizeof if p.mType == 1: arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents value = [x for x in arr] elif p.mType == 3: #string can't be an array value = cast(p.mData, POINTER(structs.MaterialPropertyString)).contents.data.decode("utf-8") elif p.mType == 4: arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents value = [x for x in arr] else: value = p.mData[:p.mDataLength] if len(value) == 1: [value] = value result[key] = value return PropertyGetter(result)
def vlan_rtattr_handler(id, data, meta): ## print ctypes.sizeof(data.contents) if id == if_link.IFLA_VLAN_ID: return 'vlan_id', (ctypes.cast(data, ctypes.POINTER(ctypes.c_short))).contents.value elif id == if_link.IFLA_VLAN_FLAGS: vlan_flags = (ctypes.cast(data, ctypes.POINTER(if_link.IflaVlanFlags))).contents return 'vlan_flags', vlan_flags.flags
def getValue(self): if self.type in (GPWidget.WINDOW,GPWidget.SECTION): return None value = ctypes.c_void_p() rc = self.api.checkedGP.gp_widget_get_value(self.c, PTR(value)) if self.type in (GPWidget.MENU, GPWidget.RADIO, GPWidget.TEXT): value = ctypes.cast(value, ctypes.c_char_p) value = value.value elif self.type == GPWidget.RANGE: if value.value is None: value = None else: value = ctypes.cast(ctypes.pointer(value), ctypes.POINTER(ctypes.c_float)) value = value.contents.value elif self.type in (GPWidget.TOGGLE,GPWidget.DATE): if value.value is None: value = 0 else: value = value.value elif self.type == GPWidget.BUTTON: raise Exception('Getting a value for a GPWidget.BUTTON should return a CameraWidgetCallback but we haven\'t built that yet') else: raise Exception('Unknown widget type %r'%self.type) self.api.check(rc) return value
def backward_entry(num_ograds, num_igrads, ptrs, reqs, is_train, _): """entry point for backward.""" # pylint: disable=W0613 try: output_grads = [NDArray(ctypes.cast(i, NDArrayHandle), writable=False) \ for i in ptrs[:num_ograds]] input_grads = [NDArray(ctypes.cast(i, NDArrayHandle), writable=True) \ for i in ptrs[num_ograds:num_ograds+num_igrads]] reqs = [reqs[i] for i in range(num_igrads)] rets = self.backward(*output_grads) if isinstance(rets, NDArray): rets = (rets,) assert len(rets) == len(input_grads), \ "%s.backward must return exactly the same number " \ "of NDArrays as the number of NDArrays arguments to forward." \ "Expecting %d got %d"%(self.__class__.name, len(input_grads), len(rets)) for igrad, ret, req in zip(input_grads, rets, reqs): assert isinstance(ret, NDArray), \ "autograd.Function.backward must return NDArrays, not %s"%type(ret) if req == 0: # null return elif req == 1 or req == 2: # write or inplace igrad[:] = ret elif req == 'add': igrad[:] += ret except Exception: # pylint: disable=broad-except print('Error in Function.backward: %s' % traceback.format_exc()) return False return True
def default_conv(n_messages, messages, p_response, app_data): addr = CALLOC(n_messages, sizeof(PamResponse)) p_response[0] = cast(addr, POINTER(PamResponse)) if not os.isatty(sys.stdin.fileno()): return 0 for i in range(n_messages): msg = messages[i].contents style = msg.msg_style msg_string = cast(msg.msg, c_char_p).value if style == PAM_TEXT_INFO or style == PAM_ERROR_MSG: # back from POINTER(c_char) to c_char_p print msg_string elif style == PAM_PROMPT_ECHO_ON: print msg_string, sys.stdout.flush() pw_copy = STRDUP(sys.stdin.readline()) p_response.contents[i].resp = pw_copy p_response.contents[i].resp_retcode = 0 elif style == PAM_PROMPT_ECHO_OFF: pw_copy = STRDUP(str(getpass.getpass(msg_string))) p_response.contents[i].resp = pw_copy p_response.contents[i].resp_retcode = 0 else: print repr(messages[i].contents) return 0
def get_modal_handlers(context): window = context.window if not window: return [] addr = window.as_pointer() win = cast(addr, POINTER(wmWindow)).contents handlers = [] ptr = cast(win.modalhandlers.first, POINTER(wmEventHandler)) while ptr: handler = ptr.contents area = handler.op_area # NULLの場合はNone region = handler.op_region # NULLの場合はNone idname = 'UNKNOWN' if handler.ui_handle: idname = 'UI' if handler.op: op = handler.op.contents ot = op.type.contents if ot.idname: idname = ot.idname.decode() handlers.append((handler, idname, area, region, handler.op_region_type)) ptr = handler.next return handlers
def get_characterset_name(self, attribute, override_value): """Retrieve and store the IANA character set name for the attribute.""" # if override value specified, use it if override_value: return override_value # get character set id c_charset_id = oci.ub2() # not using pythonic OCIAttrGet on purpose here. status = oci.OCIAttrGet(self.handle, oci.OCI_HTYPE_ENV, byref(c_charset_id), None, attribute, self.error_handle) self.check_for_error(status, "Environment_GetCharacterSetName(): get charset id") # get character set name c_charset_name_array = ctypes.create_string_buffer(oci.OCI_NLS_MAXBUFSZ) c_charset_name_pointer = ctypes.cast(c_charset_name_array, oci.OCINlsCharSetIdToName.argtypes[1]) status = oci.OCINlsCharSetIdToName(self.handle, c_charset_name_pointer, oci.OCI_NLS_MAXBUFSZ, c_charset_id) self.check_for_error(status, "Environment_GetCharacterSetName(): get Oracle charset name") # get IANA character set name c_iana_charset_name_array = ctypes.create_string_buffer(oci.OCI_NLS_MAXBUFSZ) c_iana_charset_name_pointer = ctypes.cast(c_iana_charset_name_array, oci.OCINlsNameMap.argtypes[1]) status = oci.OCINlsNameMap(self.handle, c_iana_charset_name_pointer, oci.OCI_NLS_MAXBUFSZ, c_charset_name_pointer, oci.OCI_NLS_CS_ORA_TO_IANA) self.check_for_error(status, "Environment_GetCharacterSetName(): translate NLS charset") return c_iana_charset_name_array.value
def _get_properties(properties, length): """ Convenience Function to get the material properties as a dict and values in a python format. """ result = {} #read all properties for p in [properties[i] for i in range(length)]: #the name p = p.contents key = str(p.mKey.data) #the data from ctypes import POINTER, cast, c_int, c_float, sizeof if p.mType == 1: arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents value = numpy.array([x for x in arr]) elif p.mType == 3: #string can't be an array value = cast(p.mData, POINTER(structs.String)).contents.data elif p.mType == 4: arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents value = numpy.array([x for x in arr]) else: value = p.mData[:p.mDataLength] result[key] = value return result
def get_pointee_address(obj): """ Returns the address of the struct pointed by the obj, or null if invalid. :param obj: a pointer. """ import ctypes # check for homebrew POINTER if hasattr(obj, '_sub_addr_'): # print 'obj._sub_addr_', hex(obj._sub_addr_) return obj._sub_addr_ elif isinstance(obj, int) or isinstance(obj, long): # basictype pointers are created as int. return obj elif not bool(obj): return 0 elif ctypes.is_function_type(type(obj)): return ctypes.cast(obj, ctypes.c_void_p).value elif ctypes.is_pointer_type(type(obj)): return ctypes.cast(obj, ctypes.c_void_p).value # check for null pointers # if bool(obj): if not hasattr(obj, 'contents'): return 0 # print '** NOT MY HAYSTACK POINTER' return ctypes.addressof(obj.contents) else: return 0
def force_cast(RESTYPE, value): """Cast a value to a result type, trying to use the same rules as C.""" if not isinstance(RESTYPE, lltype.LowLevelType): raise TypeError("rffi.cast() first arg should be a TYPE") if isinstance(value, llmemory.AddressAsInt): value = value.adr if isinstance(value, llmemory.fakeaddress): value = value.ptr or 0 TYPE1 = lltype.typeOf(value) cvalue = lltype2ctypes(value) cresulttype = get_ctypes_type(RESTYPE) if isinstance(TYPE1, lltype.Ptr): if isinstance(RESTYPE, lltype.Ptr): # shortcut: ptr->ptr cast cptr = ctypes.cast(cvalue, cresulttype) return ctypes2lltype(RESTYPE, cptr) # first cast the input pointer to an integer cvalue = ctypes.cast(cvalue, ctypes.c_void_p).value if cvalue is None: cvalue = 0 elif isinstance(cvalue, (str, unicode)): cvalue = ord(cvalue) # character -> integer if not isinstance(cvalue, (int, long, float)): raise NotImplementedError("casting %r to %r" % (TYPE1, RESTYPE)) if isinstance(RESTYPE, lltype.Ptr): # upgrade to a more recent ctypes (e.g. 1.0.2) if you get # an OverflowError on the following line. cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) else: cvalue = cresulttype(cvalue).value # mask high bits off if needed return ctypes2lltype(RESTYPE, cvalue)
def py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length): """This function must fill pointers to individual entries that will be added to the index. The C API will actually call this function to fill out the pointers. If this function returns anything other than 0, it is assumed that the stream of data is done.""" try: p_id[0], coordinates, obj = stream_iter.next() except StopIteration: # we're done return -1 # set the id if self.interleaved: coordinates = Index.deinterleave(coordinates) # this code assumes the coords ar not interleaved. # xmin, xmax, ymin, ymax, zmin, zmax for i in range(dimension): mins[i] = coordinates[i*2] maxs[i] = coordinates[(i*2)+1] p_mins[0] = ctypes.cast(mins, ctypes.POINTER(ctypes.c_double)) p_maxs[0] = ctypes.cast(maxs, ctypes.POINTER(ctypes.c_double)) # set the dimension p_dimension[0] = dimension if obj is None: p_data[0] = no_data p_length[0] = 0 else: p_length[0], data, _ = self._serialize(obj) p_data[0] = ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte)) return 0
def test_SDL_CalculateGammaRamp(self): # TODO: more tests self.assertRaises(TypeError, pixels.SDL_CalculateGammaRamp, None) self.assertRaises(TypeError, pixels.SDL_CalculateGammaRamp, "Test") self.assertRaises(TypeError, pixels.SDL_CalculateGammaRamp, 7) self.assertRaises(TypeError, pixels.SDL_CalculateGammaRamp, -0.00002) vals = (Uint16 * 256)() pixels.SDL_CalculateGammaRamp(0, cast(vals, POINTER(Uint16))) self.assertEqual(len(vals), 256) for x in vals: self.assertEqual(x, 0) vals = (Uint16 * 256)() pixels.SDL_CalculateGammaRamp(1, cast(vals, POINTER(Uint16))) self.assertEqual(len(vals), 256) p = 0 for x in vals: self.assertEqual(x, p) p += 257 vals = (Uint16 * 256)() pixels.SDL_CalculateGammaRamp(0.5, cast(vals, POINTER(Uint16))) self.assertEqual(len(vals), 256) p, step = 0, 1 for x in vals: if p == 33124: # dubios rounding correction - is this really correct? p = 33123 self.assertEqual(x, p) p = x + step step += 2
def _handler_keymaps(context, handlers): if not handlers.first: return [] wm = context.window_manager keymaps = [] handler_ptr = ct.cast(ct.c_void_p(handlers.first), ct.POINTER(structures.wmEventHandler)) while handler_ptr: handler = handler_ptr.contents if ord(handler.flag) & structures.WM_HANDLER_DO_FREE: pass else: if handler.keymap: if handler.keymap: km = ct.cast(ct.c_void_p(handler.keymap), ct.POINTER(wmKeyMap)).contents name = km.idname.decode() keymap = wm.keyconfigs.user.keymaps.get(name) if keymap: keymaps.append(keymap.active()) else: # ミス raise ValueError() handler_ptr = handler.next return keymaps
def attach_uretprobe(self, name="", sym="", addr=None, fn_name="", pid=-1, cpu=0, group_fd=-1): """attach_uretprobe(name="", sym="", addr=None, fn_name="" pid=-1, cpu=0, group_fd=-1) Run the bpf function denoted by fn_name every time the symbol sym in the library or binary 'name' finishes execution. See attach_uprobe for meaning of additional parameters. """ name = str(name) (path, addr) = BPF._check_path_symbol(name, sym, addr) self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = "r_%s_0x%x" % (self._probe_repl.sub("_", path), addr) desc = "r:uprobes/%s %s:0x%x" % (ev_name, path, addr) res = lib.bpf_attach_uprobe(fn.fd, ev_name.encode("ascii"), desc.encode("ascii"), pid, cpu, group_fd, self._reader_cb_impl, ct.cast(id(self), ct.py_object)) res = ct.cast(res, ct.c_void_p) if not res: raise Exception("Failed to attach BPF to uprobe") self._add_uprobe(ev_name, res) return self
def get_transport_addr(pdu): """Retrieves the IP source address from the PDU's reference to an opaque transport data struct. Only works when assuming the opaque structure is sockaddr_in and sockaddr_in6. It should be as long as we are only using an IPv4 or IPv6-based netsnmp transport. """ if pdu.transport_data_length <= 1: return # peek the first two bytes of the pdu's transport data to determine socket # address family (we are assuming the transport_data is a sockaddr_in or # sockaddr_in6 structure and accessing it naughtily here) family_p = cast(pdu.transport_data, POINTER(c_ushort)) family = family_p.contents.value if family not in (AF_INET, AF_INET6): return addr_size, offset = ((IPADDR_SIZE, IPADDR_OFFSET) if family == AF_INET else (IP6ADDR_SIZE, IP6ADDR_OFFSET)) buffer_type = c_char * pdu.transport_data_length data_p = cast(pdu.transport_data, POINTER(buffer_type)) data = data_p.contents addr = data[offset:offset + addr_size] return inet_ntop(family, addr)
def attach_tracepoint(self, tp="", fn_name="", pid=-1, cpu=0, group_fd=-1): """attach_tracepoint(tp="", fn_name="", pid=-1, cpu=0, group_fd=-1) Run the bpf function denoted by fn_name every time the kernel tracepoint specified by 'tp' is hit. The optional parameters pid, cpu, and group_fd can be used to filter the probe. The tracepoint specification is simply the tracepoint category and the tracepoint name, separated by a colon. For example: sched:sched_switch, syscalls:sys_enter_bind, etc. To obtain a list of kernel tracepoints, use the tplist tool or cat the file /sys/kernel/debug/tracing/available_events. Example: BPF(text).attach_tracepoint("sched:sched_switch", "on_switch") """ fn = self.load_func(fn_name, BPF.TRACEPOINT) (tp_category, tp_name) = tp.split(':') res = lib.bpf_attach_tracepoint(fn.fd, tp_category.encode("ascii"), tp_name.encode("ascii"), pid, cpu, group_fd, self._reader_cb_impl, ct.cast(id(self), ct.py_object)) res = ct.cast(res, ct.c_void_p) if not res: raise Exception("Failed to attach BPF to tracepoint") self.open_tracepoints[tp] = res return self
def wl_ioctl(cmd, buff=''): req = apple80211req() req.ifname = "en0\0" req.req_type = APPLE80211_IOC_CARD_SPECIFIC req.req_val = cmd if len(buff) != 0: # TODO: create_string agrega '\0'. buff = ctypes.create_string_buffer(buff) req.req_data = ctypes.cast(buff, ctypes.c_void_p) req.req_len = len(buff) - 1 else: buff = ctypes.create_string_buffer(4) req.req_data = ctypes.cast(buff, ctypes.c_void_p) req.req_len = 4 libSystem = ctypes.cdll.LoadLibrary("/usr/lib/libSystem.B.dylib") s = socket.socket() if libSystem.ioctl(s.fileno(), SIOCSA80211, ctypes.byref(req)) != 0: libSystem.__error.restype = ctypes.POINTER(ctypes.c_int) libSystem.strerror.restype = ctypes.c_char_p errno = libSystem.__error().contents.value raise Exception("ioctl error: %s" % libSystem.strerror(errno)) s.close() return ''.join(x for x in buff)
def test_iscsi_login_opts_setup(self): fake_username = '******' fake_password = '******' auth_type = constants.ISCSI_CHAP_AUTH_TYPE login_opts = iscsi_struct.ISCSI_LOGIN_OPTIONS(Username=fake_username, Password=fake_password, AuthType=auth_type) self.assertIsInstance(login_opts.Username, iscsi_struct.PUCHAR) self.assertIsInstance(login_opts.Password, iscsi_struct.PUCHAR) self.assertEqual(len(fake_username), login_opts.UsernameLength) self.assertEqual(len(fake_password), login_opts.PasswordLength) username_struct_contents = ctypes.cast( login_opts.Username, ctypes.POINTER(ctypes.c_char * len(fake_username))).contents.value pwd_struct_contents = ctypes.cast( login_opts.Password, ctypes.POINTER(ctypes.c_char * len(fake_password))).contents.value self.assertEqual(six.b(fake_username), username_struct_contents) self.assertEqual(six.b(fake_password), pwd_struct_contents) expected_info_bitmap = (iscsi_struct.ISCSI_LOGIN_OPTIONS_USERNAME | iscsi_struct.ISCSI_LOGIN_OPTIONS_PASSWORD | iscsi_struct.ISCSI_LOGIN_OPTIONS_AUTH_TYPE) self.assertEqual(expected_info_bitmap, login_opts.InformationSpecified)
def animationStackInfo( self ): ''' Returns the current animation stack and size ''' # Read in size (needed to read stack array) size = cast( control.kiibohd.Pixel_AnimationStack_HostSize, POINTER( c_uint16 ) )[0] # Update struct to use read size in order to determine size of stack class AnimationStack( Structure ): ''' C-Struct for AnimationStack See Macro/PixelMap/pixel.h ''' def __repr__(self): val = "(size={}, stack={})".format( self.size, self.stack, ) return val # Dynamic size for stack AnimationStack._fields_ = [ ( "size", c_uint16 ), ( "stack", POINTER( AnimationStackElement * size ) ), ] # Cast animation stack stack = cast( control.kiibohd.Pixel_AnimationStack, POINTER( AnimationStack ) )[0] return stack
def datetime_data(dtype): """Return (unit, numerator, denominator, events) from a datetime dtype """ try: import ctypes except ImportError: raise RuntimeError, "Cannot access date-time internals without ctypes installed" if dtype.kind not in ['m','M']: raise ValueError, "Not a date-time dtype" obj = dtype.metadata[METADATA_DTSTR] class DATETIMEMETA(ctypes.Structure): _fields_ = [('base', ctypes.c_int), ('num', ctypes.c_int), ('den', ctypes.c_int), ('events', ctypes.c_int)] func = ctypes.pythonapi.PyCObject_AsVoidPtr func.argtypes = [ctypes.py_object] func.restype = ctypes.c_void_p result = func(ctypes.py_object(obj)) result = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(DATETIMEMETA)) struct = result[0] base = struct.base # FIXME: This needs to be kept consistent with enum in ndarrayobject.h from numpy.core.multiarray import DATETIMEUNITS obj = ctypes.py_object(DATETIMEUNITS) result = func(obj) _unitnum2name = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(ctypes.c_char_p)) return (_unitnum2name[base], struct.num, struct.den, struct.events)
def ConversationFunction(num_msg, msg, resp, _app_data_ptr): """Conversation function that will be provided to PAM modules. The function replies with a password for each message with PAM_PROMPT_ECHO_OFF style and just ignores the others. """ if num_msg > MAX_MSG_COUNT: logging.warning("Too many messages passed to conv function: [%d]", num_msg) return PAM_BUF_ERR response = cf.calloc(num_msg, c.sizeof(PamResponse)) if not response: logging.warning("calloc failed in conv function") return PAM_BUF_ERR resp[0] = c.cast(response, c.POINTER(PamResponse)) for i in range(num_msg): if msg[i].contents.msg_style != PAM_PROMPT_ECHO_OFF: continue resp.contents[i].resp = cf.strndup(password, len(password)) if not resp.contents[i].resp: logging.warning("strndup failed in conv function") for j in range(i): cf.free(c.cast(resp.contents[j].resp, c.c_void_p)) cf.free(response) return PAM_BUF_ERR resp.contents[i].resp_retcode = 0 return PAM_SUCCESS
def attach_kretprobe(self, event="", fn_name="", event_re="", pid=-1, cpu=0, group_fd=-1): # allow the caller to glob multiple functions together if event_re: for line in self._get_kprobe_functions(event_re): try: self.attach_kretprobe(event=line, fn_name=fn_name, pid=pid, cpu=cpu, group_fd=group_fd) except: pass return event = str(event) self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = "r_" + event.replace("+", "_").replace(".", "_") desc = "r:kprobes/%s %s" % (ev_name, event) res = lib.bpf_attach_kprobe(fn.fd, ev_name.encode("ascii"), desc.encode("ascii"), pid, cpu, group_fd, self._reader_cb_impl, ct.cast(id(self), ct.py_object)) res = ct.cast(res, ct.c_void_p) if not res: raise Exception("Failed to attach BPF to kprobe") self._add_kprobe(ev_name, res) return self
def wrapped_setter(p, user_data): v = ctypes.cast(p, ctypes.POINTER(ctype)) d = ctypes.cast(user_data, ctypes.py_object) if d.value is not None: setter(v[0], d.value) else: setter(v[0])
def attach_uprobe(self, name="", sym="", addr=None, fn_name="", pid=-1, cpu=0, group_fd=-1): """attach_uprobe(name="", sym="", addr=None, fn_name="" pid=-1, cpu=0, group_fd=-1) Run the bpf function denoted by fn_name every time the symbol sym in the library or binary 'name' is encountered. The real address addr may be supplied in place of sym. Optional parameters pid, cpu, and group_fd can be used to filter the probe. Libraries can be given in the name argument without the lib prefix, or with the full path (/usr/lib/...). Binaries can be given only with the full path (/bin/sh). Example: BPF(text).attach_uprobe("c", "malloc") BPF(text).attach_uprobe("/usr/bin/python", "main") """ name = str(name) (path, addr) = BPF._check_path_symbol(name, sym, addr) self._check_probe_quota(1) fn = self.load_func(fn_name, BPF.KPROBE) ev_name = "p_%s_0x%x" % (self._probe_repl.sub("_", path), addr) desc = "p:uprobes/%s %s:0x%x" % (ev_name, path, addr) res = lib.bpf_attach_uprobe(fn.fd, ev_name.encode("ascii"), desc.encode("ascii"), pid, cpu, group_fd, self._reader_cb_impl, ct.cast(id(self), ct.py_object)) res = ct.cast(res, ct.c_void_p) if not res: raise Exception("Failed to attach BPF to uprobe") self._add_uprobe(ev_name, res) return self
def get_vlist(self, vset, vlist_idx): """ Return the vlist[vlist_idx] of vset[vset_idx] """ listptr = cast(vset.contents.vlist, POINTER(pmValue)) return listptr[vlist_idx]
def callback(mode, first, count, primcount): first_p = ctypes.cast(first, ctypes.POINTER(ctypes.c_int)) count_p = ctypes.cast(count, ctypes.POINTER(ctypes.c_uint)) for i in range(primcount): glDrawArrays(mode, first_p[i], count_p[i])
del tprev if 'scipy.special' not in sys.modules and __name__ == '__main__': tprev = timer() print_("Importing scipy.special...", end=' ', file=sys.stderr) try: SetConsoleCtrlHandler_body_new = b'\xC2\x08\x00' if ctypes.sizeof( ctypes.c_void_p) == 4 else b'\xC3' try: SetConsoleCtrlHandler_body = ( lambda kernel32: (lambda pSetConsoleCtrlHandler: kernel32.VirtualProtect( pSetConsoleCtrlHandler, ctypes.c_size_t(1), 0x40, ctypes.byref(ctypes.c_uint32(0))) and (ctypes.c_char * 3).from_address(pSetConsoleCtrlHandler.value)) (ctypes.cast(kernel32.SetConsoleCtrlHandler, ctypes.c_void_p)))( ctypes.windll.kernel32) except: SetConsoleCtrlHandler_body = None if SetConsoleCtrlHandler_body: SetConsoleCtrlHandler_body_old = SetConsoleCtrlHandler_body[ 0:len(SetConsoleCtrlHandler_body_new)] SetConsoleCtrlHandler_body[0:len(SetConsoleCtrlHandler_body_new )] = SetConsoleCtrlHandler_body_new try: import scipy.special finally: if SetConsoleCtrlHandler_body: SetConsoleCtrlHandler_body[ 0:len(SetConsoleCtrlHandler_body_new )] = SetConsoleCtrlHandler_body_old
def pqos_init_mock(cfg_ref): "Mock pqos_init()." p_cfg = ctypes.cast(cfg_ref, ctypes.POINTER(CPqosConfig)) self.assertEqual(p_cfg.contents.verbose, expected_verbose) return 0
def gf_yo ( x_yi, x_dllo = False ) : # p(y)thon (o)bject fu_it = ctypes.cast ( x_yi, ctypes.py_object ) .value if x_dllo : gp_dllo (x_yi) return fu_it
def _checkError(error): if bool(error): message = ctypes.string_at(error) _ldb.leveldb_free(ctypes.cast(error, ctypes.c_void_p)) raise Error(message)
def get_global(self, name, typ): addr = self.engine.get_global_value_address(name) ptr = ct.cast(ct.c_void_p(addr), typ) return ptr
def _to_ctypes(cls, value): if not isinstance(value, CTypesData): raise TypeError("unexpected %s object" % type(value).__name__) address = value._convert_to_address(cls) return ctypes.cast(address, cls._ctype)
def worker(camId): CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] PIXEL_CONFIG = Aravis.PIXEL_FORMAT_MONO_8 if (CAM_CONFIG[camId]['pixel_format']=="BAYERRG8"): PIXEL_CONFIG = Aravis.PIXEL_FORMAT_BAYER_RG_8 try: cam = Aravis.Camera.new(CAM_NAME) print ("Camera found") except: print ("Camera Not Found") exit () cam.set_pixel_format (PIXEL_CONFIG) #cam.get_device().set_string_feature_value("TriggerSource", "Line3") #cam.get_device().set_string_feature_value("GainAuto", "Off") #cam.set_acquisition_mode(Aravis.AcquisitionMode.CONTINUOUS) #cam.set_trigger('On') stream = cam.create_stream (None, None) #cam.get_device().set_string_feature_value("TriggerActivation", 'FallingEdge') #cam.set_exposure_time(1000) #cam.set_gain_auto(Aravis.Auto(2)) #auto gain payload = cam.get_payload() [x,y,width,height] = cam.get_region() print(cam.get_device().get_string_feature_value("TriggerMode")) print(cam.get_device().get_available_enumeration_feature_values_as_strings("TriggerSource")) print(cam.get_device().get_available_enumeration_feature_values_as_strings("TriggerActivation")) print ("Camera vendor : %s" %(cam.get_vendor_name ())) print ("Camera model : %s" %(cam.get_model_name ())) print ("Camera id : %s" %(cam.get_device_id ())) print ("ROI : %dx%d at %d,%d" %(width, height, x, y)) print ("Payload : %d" %(payload)) print ("Pixel format : %s" %(cam.get_pixel_format_as_string ())) print ("Trigger Source : %s" %(cam.get_trigger_source())) print ("Trigger Activation : %s" %(cam.get_device().get_string_feature_value("TriggerActivation"))) print ("Acquisition Mode : %s" %(cam.get_acquisition_mode())) print ("Pixel Formats : %s" %(cam.get_available_pixel_formats_as_display_names())) cv2.namedWindow(WINDOW_NAME, flags=0) cam.start_acquisition() lastTime = time.time() def changeCamStringValue(feature, value): cam.get_device().set_string_feature_value(feature, value) return cam.get_device().get_string_feature_value(feature) def changeCamFloatValue(feature, value): cam.get_device().set_float_feature_value(feature, value) return cam.get_device().get_float_feature_value(feature) def changeCamIntegerValue(feature, value): cam.get_device().set_integer_feature_value(feature, value) return cam.get_device().get_integer_feature_value(feature) lastSnapshot = None GAIN_AUTO = cam.get_device().get_string_feature_value("GainAuto") EXPOSURE_AUTO = cam.get_device().get_string_feature_value("ExposureAuto") GAIN = cam.get_device().get_float_feature_value("Gain") EXPOSURE = cam.get_device().get_float_feature_value("ExposureTime") EXPOSURE_AUTO_MIN = cam.get_device().get_float_feature_value("AutoExposureTimeMin") EXPOSURE_AUTO_MAX = cam.get_device().get_float_feature_value("AutoExposureTimeMax") GAIN_AUTO_MIN = cam.get_device().get_float_feature_value("AutoGainMin") GAIN_AUTO_MAX = cam.get_device().get_float_feature_value("AutoGainMax") TRIGGER_DELAY = cam.get_device().get_float_feature_value("TriggerDelay") EXPECTED_GRAY = cam.get_device().get_integer_feature_value("ExpectedGrayValue") SHOW_VALUES = False UNIT = 10 UNIT_MULTI = 1 while(True): now = datetime.datetime.now() #night-mode if False: cam.set_exposure_time(10000) cam.get_device().set_string_feature_value("Gain", 10.0) #day-mode if False: cam.set_exposure_time(500) cam.get_device().set_string_feature_value("Gain", 0.0) stream.push_buffer(Aravis.Buffer.new_allocate(payload)) buffer = stream.pop_buffer () k = cv2.waitKey(1) if k==113: #q SHOW_VALUES=True if k==97: #a SHOW_VALUES=False if k==49: #1 GAIN_AUTO=changeCamStringValue('GainAuto', 'Continuous') GAIN = cam.get_device().get_float_feature_value("Gain") if k==50: #2 GAIN_AUTO=changeCamStringValue('GainAuto', 'Off') GAIN = cam.get_device().get_float_feature_value("Gain") if k==51: #3 EXPOSURE_AUTO=changeCamStringValue('ExposureAuto', 'Continuous') EXPOSURE = cam.get_device().get_float_feature_value("ExposureTime") if k==32: #4 EXPOSURE_AUTO=changeCamStringValue('ExposureAuto', 'Off') EXPOSURE = cam.get_device().get_float_feature_value("ExposureTime") if k==111: #o EXPOSURE_AUTO_MIN=changeCamFloatValue('AutoExposureTimeMin', EXPOSURE_AUTO_MIN+UNIT) if k==108: #l EXPOSURE_AUTO_MIN=changeCamFloatValue('AutoExposureTimeMin', EXPOSURE_AUTO_MIN-UNIT) if k==105: #i EXPOSURE_AUTO_MAX=changeCamFloatValue('AutoExposureTimeMax', EXPOSURE_AUTO_MAX+UNIT) if k==107: #k EXPOSURE_AUTO_MAX=changeCamFloatValue('AutoExposureTimeMax', EXPOSURE_AUTO_MAX-UNIT) if k==121: #y GAIN_AUTO_MIN=changeCamFloatValue('AutoGainMin', GAIN_AUTO_MIN+UNIT) if k==104: #h GAIN_AUTO_MIN=changeCamFloatValue('AutoGainMin', GAIN_AUTO_MIN-UNIT) if k==117: #u GAIN_AUTO_MAX=changeCamFloatValue('AutoGainMax', GAIN_AUTO_MAX+UNIT) if k==106: #j GAIN_AUTO_MAX=changeCamFloatValue('AutoGainMax', GAIN_AUTO_MAX-UNIT) if k==116: #t TRIGGER_DELAY=changeCamFloatValue('TriggerDelay', TRIGGER_DELAY+UNIT) if k==103: #g TRIGGER_DELAY=changeCamFloatValue('TriggerDelay', TRIGGER_DELAY-UNIT) if k==114: #r EXPECTED_GRAY=changeCamFloatValue('ExpectedGrayValue', EXPECTED_GRAY+UNIT) if k==102: #f EXPECTED_GRAY=changeCamFloatValue('ExpectedGrayValue', EXPECTED_GRAY-UNIT) if k==118: #v GAIN=changeCamFloatValue('Gain', GAIN+UNIT) if k==98: #b GAIN=changeCamFloatValue('Gain', GAIN-UNIT) if k==110: #n EXPOSURE=changeCamFloatValue('ExposureTime', EXPOSURE+UNIT) if k==109: #m EXPOSURE=changeCamFloatValue('ExposureTime', EXPOSURE-UNIT) if k==119: #w UNIT = UNIT + 10**UNIT_MULTI if k==115: #s UNIT = UNIT - 10**UNIT_MULTI if k==120: #x UNIT = 10 if k==119: #e UNIT_MULTI = UNIT_MULTI + 1 if k==115: #d UNIT_MULTI = UNIT_MULTI - 1 if k==99: #c UNIT_MULTI = 1 if(buffer): b = ctypes.cast(buffer.data,ctypes.POINTER(ctypes.c_uint8)) im = np.ctypeslib.as_array(b, (height, width)) rgb = cv2.cvtColor(im, cv2.COLOR_BayerRG2RGB) img = rgb.copy() if SHOW_VALUES: cv2.putText(img, "GAIN AUTO: "+str(GAIN_AUTO), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "EXPOSURE AUTO: "+str(EXPOSURE_AUTO), (100, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "EXPOSURE AUTO MIN: "+str(EXPOSURE_AUTO_MIN), (100, 200), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "EXPOSURE AUTO MAX:" +str(EXPOSURE_AUTO_MAX), (100, 250), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "GAIN AUTO MIN: "+str(GAIN_AUTO_MIN), (100, 300), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "GAIN AUTO MIN: "+str(GAIN_AUTO_MAX), (100, 350), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "TRIGGER DELAY: "+str(TRIGGER_DELAY), (100, 400), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "EXPECTED GRAY: "+str(EXPECTED_GRAY), (100, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "EXPOSURE: "+str(EXPOSURE), (100, 500), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "GAIN: "+str(GAIN), (100, 550), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "UNIT: "+str(UNIT), (100, 600), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.putText(img, "UNIT MULTIPLIER: "+str(UNIT_MULTI), (100, 650), cv2.FONT_HERSHEY_SIMPLEX, 1.8, (255,0,0),2,cv2.LINE_AA) cv2.imshow(WINDOW_NAME, img) uid = uuid.uuid4() #name will be ID_XXXX_CAM_XXXX_UNIX_XXXX imageName = "ID="+str(uid)+"_CAM="+CAM_CONFIG[camId]['ref']+"_UNIX="+str(round(time.time()*1000))+".png" cv2.imwrite(CACHE_PATH+imageName,im.copy()) print('Camera ', WINDOW_NAME, ' was triggered at ', time.time()) lastTime = time.time() cv2.waitKey(1) cam.stop_acquisition ()
def _from_ctypes(cls, ctypes_ptr): address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 return cls._new_pointer_at(address)
"""WinV is a package for controlling the sound volume in windows. It takes use of the PyCaw library, which operates on a logarithmic scale between 0 and -28 , and converts it to a more readable, 0-100 scale using polynomial functions """ from ctypes import cast, POINTER from comtypes import CLSCTX_ALL import warnings from pycaw.pycaw import IAudioEndpointVolume, AudioUtilities devices = AudioUtilities.GetSpeakers() interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None) volume = cast(interface, POINTER(IAudioEndpointVolume)) ERROR_MESSAGE = 'System volume can only be set between 0 to 100, but {} was entered' def scale(x): ''' Takes in a value between 0 and 100, and converts it to the systems logarithmic scale, between 0 and -28. ''' if x < 0 or x > 100: warnings.warn( f'{x} is not between 0 and 100, and therefore is it possible for the output to be outside of the systems volume scale 0-(-28)' ) x4 = -3.20113 * (10**-7) * (x**4) x3 = 8.94101 * (10**-5) * (x**3) x2 = -1.00955 * (10**-2) * (x**2)
def _new_pointer_at(cls, address): self = cls.__new__(cls) self._address = address self._as_ctype_ptr = ctypes.cast(address, cls._ctype) return self
def format(self): """return bytes or None for NULL field""" format_addr = self._view.format if format_addr is None: return None return ctypes.cast(format_addr, ctypes.c_char_p).value.decode('ascii')
} """, b""" #version 120 varying vec4 out_color; void main() { gl_FragColor = out_color; } """ ] shader_handles = [] for i, source in enumerate(shaders_sources): handle = glCreateShader(GL_VERTEX_SHADER if i == 0 else GL_FRAGMENT_SHADER) string_buffer = create_string_buffer(source) glShaderSource(handle, 1, cast(pointer(pointer(string_buffer)), POINTER(POINTER(c_char))), None) glCompileShader(handle) shader_handles.append(handle) # Create attributes. position_name = create_string_buffer(b'position') position_index = 0 color_name = create_string_buffer(b'color') # NEW color_index = 1 # Create program. program_handle = glCreateProgram() glAttachShader(program_handle, shader_handles[0]) glAttachShader(program_handle, shader_handles[1])
def memory_writer(buf, format_name, filter_name=None): with new_archive_write(format_name, filter_name) as archive_p: used = byref(c_size_t()) buf_p = cast(buf, c_void_p) ffi.write_open_memory(archive_p, buf_p, len(buf), used) yield ArchiveWrite(archive_p)
def _to_ssize_tuple(self, addr): from ctypes import cast, POINTER, c_ssize_t if addr is None: return None return tuple(cast(addr, POINTER(c_ssize_t))[0:self._view.ndim])
def get_master(): volume = cast(interface, POINTER(IAudioEndpointVolume)) masterVolume = volume.GetMasterVolumeLevel() print(masterVolume) return masterVolume
def tobytes(self): return cast(self.buffer, POINTER(c_char))[0:self._len]
def VarArrayFunction(f, mode, name, optional): varg = VArg(0, None, 0, 0) p = ctypes.POINTER(VArg)(varg) if optional is not None: f(mode, p, optional) else: logger.debug("Calling function {} with arguments {}".format(name, (mode, p))) f(mode, p) logger.debug("Successively called and returned from function {}".format(name)) var_arr = ctypes.cast(varg.p, ctypes.POINTER(VarArray)).contents l = list() if varg.dtype == 0x2008 and var_arr.length != 0: # CString data = ctypes.cast(var_arr.data, ctypes.POINTER(POINTER * var_arr.length)) for s in data.contents: if s == 0: continue else: length = ctypes.cast(s - HEADER_SIZE, ctypes.POINTER(ctypes.c_uint8)).contents.value if is_delphi(): length = int(length / 2) s = ctypes.cast(s, ctypes.POINTER(ctypes.c_int16 * length)) s = u''.join([chr(x) for x in s.contents[:]]) if s.lower() != 'none': l.append(s) elif varg.dtype == 0x2005 and var_arr.length != 0: # Float64 data = ctypes.cast(var_arr.data, ctypes.POINTER(ctypes.c_double * var_arr.length)) # Converting CFloat to Python float, more efficiency could be gained by using NumPy # TODO: Consider making numpy/pandas a dependency? for i in data.contents: l.append(i) elif varg.dtype == 0x2003 and var_arr.length != 0: # Int32 data = ctypes.cast(var_arr.data, ctypes.POINTER(ctypes.c_int32 * var_arr.length)) # Converting CInt32 to Python float, more efficiency could be gained by using NumPy # TODO: Consider making numpy/pandas a dependency? for i in data.contents: l.append(i) elif varg.dtype == 0x2011 and var_arr.length != 0: signature = ctypes.cast(var_arr.data, ctypes.POINTER(ctypes.c_int32)).contents.value if signature != 43756: logger.warning("ByteStream did not contain expected signature. Found {} but expected 43756".format(signature)) else: # data = ctypes.cast(var_arr.data, ctypes.POINTER(ctypes.c_int32 * 4)) # signature, version, size, mode = data.contents p = ctypes.cast(var_arr.data, ctypes.POINTER(ctypes.c_int32)) a_ptr = ctypes.cast(p, ctypes.c_void_p) a_ptr.value += ctypes.sizeof(p._type_) version = ctypes.cast(a_ptr, ctypes.POINTER(ctypes.c_int32)).contents.value a_ptr.value += ctypes.sizeof(p._type_) size = ctypes.cast(a_ptr, ctypes.POINTER(ctypes.c_int32)).contents.value a_ptr.value += ctypes.sizeof(p._type_) mode = ctypes.cast(a_ptr, ctypes.POINTER(ctypes.c_int32)).contents.value logger.debug("version={version}, size={size}, mode={mode}".format(version=version, size=size, mode=mode)) a_ptr.value += ctypes.sizeof(p._type_) header = ctypes.cast(a_ptr, ctypes.POINTER(ctypes.c_char * 256)).contents.value header = [i.strip() for i in header.decode('ascii').strip().rstrip(',').split(',')] a_ptr.value = a_ptr.value + 256 * ctypes.sizeof(p._type_) count = (var_arr.length - 272) / 4 / (size + 2) if int(count) != count: logger.error( "Expected count to be integer but found count={count}".format( count=count, ) ) else: count = int(count) data = ctypes.cast(a_ptr, ctypes.POINTER(ctypes.c_float * (size + 2) * count)) for row in data.contents[:]: for i, v in enumerate(row[:]): l.append(v) try: l = np.array(l).reshape([-1, len(header)]) l = pd.DataFrame(l, columns=header) except NameError: l = [l, header] elif var_arr.length == 0: logger.warning("Empty var_arr found") else: logger.warning("Unsupported dtype {} returned for {}. Please contact developer".format(varg.dtype, name)) return l
def write_cb_internal(archive_p, context, buffer_, length): data = cast(buffer_, POINTER(c_char * length))[0] return write_func(data)
def test_mem_2(): gdal.PushErrorHandler('CPLQuietErrorHandler') ds = gdal.Open('MEM:::') gdal.PopErrorHandler() assert ds is None, 'opening MEM dataset should have failed.' try: import ctypes except ImportError: pytest.skip() for libname in ['msvcrt', 'libc.so.6']: try: crt = ctypes.CDLL(libname) except OSError: crt = None if crt is not None: break if crt is None: pytest.skip() malloc = crt.malloc malloc.argtypes = [ctypes.c_size_t] malloc.restype = ctypes.c_void_p free = crt.free free.argtypes = [ctypes.c_void_p] free.restype = None # allocate band data array. width = 50 height = 3 p = malloc(width * height * 4) if p is None: pytest.skip() float_p = ctypes.cast(p, ctypes.POINTER(ctypes.c_float)) # build ds name. dsnames = [ 'MEM:::DATAPOINTER=0x%X,PIXELS=%d,LINES=%d,BANDS=1,DATATYPE=Float32,PIXELOFFSET=4,LINEOFFSET=%d,BANDOFFSET=0' % (p, width, height, width * 4), 'MEM:::DATAPOINTER=0x%X,PIXELS=%d,LINES=%d,DATATYPE=Float32' % (p, width, height) ] for dsname in dsnames: for i in range(width * height): float_p[i] = 5.0 ds = gdal.Open(dsname) if ds is None: free(p) pytest.fail('opening MEM dataset failed.') chksum = ds.GetRasterBand(1).Checksum() if chksum != 750: print(chksum) free(p) pytest.fail('checksum failed.') ds.GetRasterBand(1).Fill(100.0) ds.FlushCache() if float_p[0] != 100.0: print(float_p[0]) free(p) pytest.fail('fill seems to have failed.') ds = None free(p)
def cb(cpu, data, size): self.assertGreater(size, ct.sizeof(Data)) event = ct.cast(data, ct.POINTER(Data)).contents self.events.append(event)
def creator(op_type, argc, keys, vals, ret): """internal function""" assert py_str(op_type) == reg_name kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)]) op_prop = prop_cls(**kwargs) def infer_shape_entry(num_tensor, tensor_dims, tensor_shapes, _): """C Callback for ``CustomOpProp::InferShape``.""" try: n_in = len(op_prop.list_arguments()) n_out = len(op_prop.list_outputs()) n_aux = len(op_prop.list_auxiliary_states()) assert num_tensor == n_in + n_out + n_aux shapes = [[ tensor_shapes[i][j] for j in range(tensor_dims[i]) ] for i in range(n_in)] ret = op_prop.infer_shape(shapes) if len(ret) == 2: ishape, oshape = ret ashape = [] elif len(ret) == 3: ishape, oshape, ashape = ret else: raise AssertionError( "infer_shape must return 2 or 3 lists") assert len(oshape) == n_out assert len(ishape) == n_in assert len(ashape) == n_aux rshape = list(ishape) + list(oshape) + list(ashape) for i in range(n_in + n_out + n_aux): tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint)) tensor_dims[i] = len(rshape[i]) infer_shape_entry._ref_holder = [tensor_shapes] except Exception: print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc())) return False return True def infer_type_entry(num_tensor, tensor_types, _): """C Callback for CustomOpProp::InferType""" try: n_in = len(op_prop.list_arguments()) n_out = len(op_prop.list_outputs()) n_aux = len(op_prop.list_auxiliary_states()) assert num_tensor == n_in + n_out + n_aux types = [ _DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in) ] ret = op_prop.infer_type(types) if len(ret) == 2: itype, otype = ret atype = [] elif len(ret) == 3: itype, otype, atype = ret else: raise AssertionError( "infer_type must return 2 or 3 lists") assert len(otype) == n_out assert len(itype) == n_in assert len(atype) == n_aux rtype = list(itype) + list(otype) + list(atype) for i, dtype in enumerate(rtype): tensor_types[i] = _DTYPE_NP_TO_MX[dtype] infer_type_entry._ref_holder = [tensor_types] except Exception: print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc())) return False return True def list_outputs_entry(out, _): """C Callback for CustomOpProp::ListOutputs""" try: ret = op_prop.list_outputs() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) list_outputs_entry._ref_holder = [out] except Exception: print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc())) return False return True def list_arguments_entry(out, _): """C Callback for CustomOpProp::ListArguments""" try: ret = op_prop.list_arguments() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) list_arguments_entry._ref_holder = [out] except Exception: print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc())) return False return True def list_auxiliary_states_entry(out, _): """C Callback for CustomOpProp::ListAuxiliaryStates""" try: ret = op_prop.list_auxiliary_states() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) list_auxiliary_states_entry._ref_holder = [out] except Exception: tb = traceback.format_exc() print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb)) return False return True def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _): """C Callback for CustomOpProp::DeclareBacwardDependency""" try: out_grad = [ out_grad[i] for i in range(len(op_prop.list_outputs())) ] in_data = [ in_data[i] for i in range(len(op_prop.list_arguments())) ] out_data = [ out_data[i] for i in range(len(op_prop.list_outputs())) ] rdeps = op_prop.declare_backward_dependency( out_grad, in_data, out_data) num_dep[0] = len(rdeps) rdeps = cast(c_array(c_int, rdeps), c_int_p) deps[0] = rdeps declare_backward_dependency_entry._ref_holder = [deps] except Exception: tb = traceback.format_exc() print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb)) return False return True def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _): """C Callback for CustomOpProp::CreateOperator""" try: ndims = [ndims[i] for i in range(num_inputs)] shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)] dtypes = [dtypes[i] for i in range(num_inputs)] op = op_prop.create_operator(ctx, shapes, dtypes) def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _): """C Callback for CustomOp::Forward""" try: tensors = [[] for i in range(5)] for i in range(num_ndarray): if tags[i] == 1 or tags[i] == 4: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) reqs = [ req_enum[reqs[i]] for i in range(len(tensors[1])) ] op.forward(is_train=is_train, req=reqs, in_data=tensors[0], out_data=tensors[1], aux=tensors[4]) #op.forward(req=reqs, in_data=tensors[0], out_data=tensors[1], aux=tensors[4]) except Exception: print('Error in CustomOp.forward: %s' % traceback.format_exc()) return False return True def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _): """C Callback for CustomOp::Backward""" # pylint: disable=W0613 try: tensors = [[] for i in range(5)] for i in range(num_ndarray): if tags[i] == 2 or tags[i] == 4: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) reqs = [ req_enum[reqs[i]] for i in range(len(tensors[2])) ] op.backward(req=reqs, in_data=tensors[0], out_data=tensors[1], in_grad=tensors[2], out_grad=tensors[3], aux=tensors[4]) except Exception: print('Error in CustomOp.backward: %s' % traceback.format_exc()) return False return True cur = _registry.inc() def delete_entry(_): """C Callback for CustomOp::del""" try: del _registry.ref_holder[cur] except Exception: print('Error in CustomOp.delete: %s' % traceback.format_exc()) return False return True callbacks = [ del_functype(delete_entry), fb_functype(forward_entry), fb_functype(backward_entry) ] callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks] contexts = [None, None, None] ret[0] = MXCallbackList( c_int(len(callbacks)), cast(c_array(CFUNCTYPE(c_int), callbacks), POINTER(CFUNCTYPE(c_int))), cast(c_array(c_void_p, contexts), POINTER(c_void_p))) op._ref_holder = [ret] _registry.ref_holder[cur] = op except Exception: print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc())) return False return True cur = _registry.inc() def delete_entry(_): """C Callback for CustomOpProp::del""" try: del _registry.ref_holder[cur] except Exception: print('Error in CustomOpProp.delete: %s' % traceback.format_exc()) return False return True callbacks = [ del_functype(delete_entry), list_functype(list_arguments_entry), list_functype(list_outputs_entry), list_functype(list_auxiliary_states_entry), infershape_functype(infer_shape_entry), deps_functype(declare_backward_dependency_entry), createop_functype(create_operator_entry), infertype_functype(infer_type_entry) ] callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks] contexts = [None] * len(callbacks) ret[0] = MXCallbackList( c_int(len(callbacks)), cast(c_array(CFUNCTYPE(c_int), callbacks), POINTER(CFUNCTYPE(c_int))), cast(c_array(c_void_p, contexts), POINTER(c_void_p))) op_prop._ref_holder = [ret] _registry.ref_holder[cur] = op_prop return True
def cast_void_p(value): return cast(value, c_void_p)
def list_arguments_entry(out, _): """C Callback for NumpyOpProp::ListArguments""" ret = self.list_arguments() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char)))
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _): """C Callback for CustomOpProp::CreateOperator""" try: ndims = [ndims[i] for i in range(num_inputs)] shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)] dtypes = [dtypes[i] for i in range(num_inputs)] op = op_prop.create_operator(ctx, shapes, dtypes) def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _): """C Callback for CustomOp::Forward""" try: tensors = [[] for i in range(5)] for i in range(num_ndarray): if tags[i] == 1 or tags[i] == 4: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) reqs = [ req_enum[reqs[i]] for i in range(len(tensors[1])) ] op.forward(is_train=is_train, req=reqs, in_data=tensors[0], out_data=tensors[1], aux=tensors[4]) #op.forward(req=reqs, in_data=tensors[0], out_data=tensors[1], aux=tensors[4]) except Exception: print('Error in CustomOp.forward: %s' % traceback.format_exc()) return False return True def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _): """C Callback for CustomOp::Backward""" # pylint: disable=W0613 try: tensors = [[] for i in range(5)] for i in range(num_ndarray): if tags[i] == 2 or tags[i] == 4: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) reqs = [ req_enum[reqs[i]] for i in range(len(tensors[2])) ] op.backward(req=reqs, in_data=tensors[0], out_data=tensors[1], in_grad=tensors[2], out_grad=tensors[3], aux=tensors[4]) except Exception: print('Error in CustomOp.backward: %s' % traceback.format_exc()) return False return True cur = _registry.inc() def delete_entry(_): """C Callback for CustomOp::del""" try: del _registry.ref_holder[cur] except Exception: print('Error in CustomOp.delete: %s' % traceback.format_exc()) return False return True callbacks = [ del_functype(delete_entry), fb_functype(forward_entry), fb_functype(backward_entry) ] callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks] contexts = [None, None, None] ret[0] = MXCallbackList( c_int(len(callbacks)), cast(c_array(CFUNCTYPE(c_int), callbacks), POINTER(CFUNCTYPE(c_int))), cast(c_array(c_void_p, contexts), POINTER(c_void_p))) op._ref_holder = [ret] _registry.ref_holder[cur] = op except Exception: print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc())) return False return True
def _archive_read_open_memory(archive, buffer_): libarchive.calls.archive_read.c_archive_read_open_memory( archive, ctypes.cast(ctypes.c_char_p(buffer_), ctypes.c_void_p), len(buffer_))
def get_symbol(self, *args, **kwargs): fb_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_void_p), POINTER(c_int), c_void_p) infer_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_int), POINTER(POINTER(mx_uint)), c_void_p) list_functype = CFUNCTYPE(c_bool, POINTER(POINTER(POINTER(c_char))), c_void_p) deps_functype = CFUNCTYPE(c_bool, c_int_p, c_int_p, c_int_p, c_int_p, POINTER(c_int_p), c_void_p) class NDArrayOpInfo(Structure): """Structure that holds Callback information. Passed to NDArrayOpProp""" _fields_ = [('forward', fb_functype), ('backward', fb_functype), ('infer_shape', infer_functype), ('list_outputs', list_functype), ('list_arguments', list_functype), ('declare_backward_dependency', deps_functype), ('p_forward', c_void_p), ('p_backward', c_void_p), ('p_infer_shape', c_void_p), ('p_list_outputs', c_void_p), ('p_list_arguments', c_void_p), ('p_declare_backward_dependency', c_void_p)] def forward_entry(num_ndarray, ndarraies, tags, _): """C Callback for NDArrayOp::Forward""" try: tensors = [[] for i in range(4)] for i in range(num_ndarray): if tags[i] == 1: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) self.forward(in_data=tensors[0], out_data=tensors[1]) except Exception: print('Error in NDArrayOp.forward: %s' % traceback.format_exc()) return False return True def backward_entry(num_ndarray, ndarraies, tags, _): """C Callback for NDArrayOp::Backward""" try: tensors = [[] for i in range(4)] for i in range(num_ndarray): if tags[i] == 2: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=True)) else: tensors[tags[i]].append( NDArray(cast(ndarraies[i], NDArrayHandle), writable=False)) self.backward(in_data=tensors[0], out_data=tensors[1], in_grad=tensors[2], out_grad=tensors[3]) except Exception: print('Error in NDArrayOp.backward: %s' % traceback.format_exc()) return False return True def infer_shape_entry(num_tensor, tensor_dims, tensor_shapes, _): """C Callback for NDArrayOpProp::InferShape""" try: n_in = len(self.list_arguments()) n_out = len(self.list_outputs()) assert num_tensor == n_in + n_out shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)] ishape, oshape = self.infer_shape(shapes) assert len(oshape) == n_out assert len(ishape) == n_in rshape = list(ishape) + list(oshape) for i in range(n_in + n_out): tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint)) tensor_dims[i] = len(rshape[i]) except Exception: print('Error in NDArrayOp.infer_shape: %s' % traceback.format_exc()) return False return True def list_outputs_entry(out, _): """C Callback for NDArrayOpProp::ListOutputs""" try: ret = self.list_outputs() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) except Exception: print('Error in NDArrayOp.list_outputs: %s' % traceback.format_exc()) return False return True def list_arguments_entry(out, _): """C Callback for NDArrayOpProp::ListArguments""" try: ret = self.list_arguments() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) except Exception: print('Error in NDArrayOp.list_arguments: %s' % traceback.format_exc()) return False return True def declare_backward_dependency(out_grad, in_data, out_data, num_dep, deps, _): """C Callback for NDArrayOpProp::DeclareBacwardDependency""" try: out_grad = [ out_grad[i] for i in range(len(self.list_outputs())) ] in_data = [ in_data[i] for i in range(len(self.list_arguments())) ] out_data = [ out_data[i] for i in range(len(self.list_outputs())) ] rdeps = self.declare_backward_dependency( out_grad, in_data, out_data) num_dep[0] = len(rdeps) rdeps = cast(c_array(c_int, rdeps), c_int_p) deps[0] = rdeps except Exception: print('Error in NDArrayOp.declare_backward_dependency: %s' % traceback.format_exc()) return False return True self.info_ = NDArrayOpInfo(fb_functype(forward_entry), fb_functype(backward_entry), infer_functype(infer_shape_entry), list_functype(list_outputs_entry), list_functype(list_arguments_entry), deps_functype(declare_backward_dependency), None, None, None, None, None, None) cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x') # pylint: disable=E1101 sym = symbol._internal._NDArray(*args, info=cb_ptr, **kwargs) # keep a reference of ourself in PythonOp so we don't get garbage collected. PythonOp._ref_holder.append(self) return sym
def lock_python_bytes(data): if sodium_mlock(cast(data, c_void_p), len(data)) != 0: raise RuntimeError('Failed to lock memory')
def get_symbol(self, *args, **kwargs): fb_functype = CFUNCTYPE(None, c_int, POINTER(POINTER(mx_float)), POINTER(c_int), POINTER(POINTER(mx_uint)), POINTER(c_int), c_void_p) infer_functype = CFUNCTYPE(None, c_int, POINTER(c_int), POINTER(POINTER(mx_uint)), c_void_p) list_functype = CFUNCTYPE(None, POINTER(POINTER(POINTER(c_char))), c_void_p) class NumpyOpInfo(Structure): """Structure that holds Callback information. Passed to NumpyOpProp""" _fields_ = [ ('forward', fb_functype), ('backward', fb_functype), ('infer_shape', infer_functype), ('list_outputs', list_functype), ('list_arguments', list_functype), ('p_forward', c_void_p), ('p_backward', c_void_p), ('p_infer_shape', c_void_p), ('p_list_outputs', c_void_p), ('p_list_arguments', c_void_p), ] def forward_entry(num_tensor, tensor_ptrs, tensor_dims, tensor_shapes, tensor_tags, _): """C Callback for NumpyOp::Forward""" tensors = [[] for i in range(4)] for i in range(num_tensor): shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])] buff = ctypes2numpy_shared(tensor_ptrs[i], shape) tensors[tensor_tags[i]].append(buff) self.forward(in_data=tensors[0], out_data=tensors[1]) def backward_entry(num_tensor, tensor_ptrs, tensor_dims, tensor_shapes, tensor_tags, _): """C Callback for NumpyOp::Backward""" tensors = [[] for i in range(4)] for i in range(num_tensor): shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])] buff = ctypes2numpy_shared(tensor_ptrs[i], shape) tensors[tensor_tags[i]].append(buff) self.backward(in_data=tensors[0], out_data=tensors[1], in_grad=tensors[2], out_grad=tensors[3]) def infer_shape_entry(num_tensor, tensor_dims, tensor_shapes, _): """C Callback for NumpyOpProp::InferShape""" n_in = len(self.list_arguments()) n_out = len(self.list_outputs()) assert num_tensor == n_in + n_out shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)] ishape, oshape = self.infer_shape(shapes) assert len(oshape) == n_out assert len(ishape) == n_in rshape = list(ishape) + list(oshape) for i in range(n_in + n_out): tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint)) tensor_dims[i] = len(rshape[i]) def list_outputs_entry(out, _): """C Callback for NumpyOpProp::ListOutputs""" ret = self.list_outputs() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) def list_arguments_entry(out, _): """C Callback for NumpyOpProp::ListArguments""" ret = self.list_arguments() ret = [c_str(i) for i in ret] + [c_char_p(0)] ret = c_array(c_char_p, ret) out[0] = cast(ret, POINTER(POINTER(c_char))) self.info_ = NumpyOpInfo(fb_functype(forward_entry), fb_functype(backward_entry), infer_functype(infer_shape_entry), list_functype(list_outputs_entry), list_functype(list_arguments_entry), None, None, None, None, None) cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x') # pylint: disable=E1101 sym = symbol._internal._Native(*args, info=cb_ptr, need_top_grad=self.need_top_grad(), **kwargs) # keep a reference of ourself in PythonOp so we don't get garbage collected. PythonOp._ref_holder.append(self) return sym