def attach_filter(fd, iface, bpf_filter_string): """Attach a BPF filter to the BPF file descriptor""" # Retrieve the BPF byte code in decimal command = "%s -i %s -ddd -s 1600 '%s'" % (conf.prog.tcpdump, iface, bpf_filter_string) try: f = os.popen(command) except OSError as msg: raise Scapy_Exception("Failed to execute tcpdump: (%s)" % msg) # Convert the byte code to a BPF program structure lines = f.readlines() if lines == []: raise Scapy_Exception("Got an empty BPF filter from tcpdump !") # Allocate BPF instructions size = int(lines[0]) bpf_insn_a = bpf_insn * size bip = bpf_insn_a() # Fill the BPF instruction structures with the byte code lines = lines[1:] for i in range(len(lines)): values = [int(v) for v in lines[i].split()] bip[i].code = c_ushort(values[0]) bip[i].jt = c_ubyte(values[1]) bip[i].jf = c_ubyte(values[2]) bip[i].k = c_uint(values[3]) # Create the BPF program and assign it to the interface bp = bpf_program(size, bip) ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p)) if ret < 0: raise Scapy_Exception("Can't attach the BPF filter !")
def fade_to_rgb(device, time, r, g, b, n): time = c_ushort(time) r = c_ubyte(r) g = c_ubyte(g) b = c_ubyte(b) n = c_ubyte(n) return fadeToRGB(device, time, r, g, b, n)
def rxByte(ack): # I2CRESULT i2c_RxByte(unsigned char* pData); #trace("calling RxByte") p_data = ctypes.c_ubyte() p_ack = ctypes.c_ubyte(ack) result = i2c_RxByte_fn(ctypes.byref(p_data), p_ack) return result, p_data.value
def write_pattern_line(device, time, r, g, b, pos): time = c_ushort(time) r = c_ubyte(r) g = c_ubyte(g) b = c_ubyte(b) pos = c_ubyte(pos) writePatternLine(device, time, r, g, b, pos)
def read_pattern_line(device, pos): time = c_ushort() r = c_ubyte() g = c_ubyte() b = c_ubyte() pos = c_ubyte(pos) readPatternLine(device, byref(time), byref(r), byref(g), byref(b), pos) return int(time.value), int(r.value), int(g.value), int(b.value)
def char_checksum(data): length = len(data) check_sum = 0 for i in range(0, length, 2): x = int(data[i:i+2], 16) check_sum = check_sum + x check_sum_str = "%02X"%ctypes.c_ubyte(ctypes.c_ubyte(~check_sum).value + 1).value return check_sum_str
def get_rgb(pixel, pformat): """Gets the mapped RGB values for a specific pixel value and format. """ r = ctypes.c_ubyte() g = ctypes.c_ubyte() b = ctypes.c_ubyte() dll.SDL_GetRGB(pixel, ctypes.byref(pformat), ctypes.byref(r), ctypes.byref(g), ctypes.byref(b)) return(r.value, g.value, b.value)
def test_who_am_i_data(self): expected= [0x2a] response= create_string_buffer(b'\x0d\x81\x0a\x2a', 4) signal= self.libmetawear.mbl_mw_i2c_get_data_signal(self.board, c_ubyte(1), c_ubyte(0xa)) self.libmetawear.mbl_mw_datasignal_subscribe(signal, self.sensor_data_handler) self.libmetawear.mbl_mw_connection_notify_char_changed(self.board, response.raw, len(response.raw)) self.assertEqual(self.data_byte_array, expected)
def get_surface_color_mod(surface): """Gets the additional color value used for blit operations. """ r, g, b = ctypes.c_ubyte(), ctypes.c_ubyte(), ctypes.c_ubyte() ret = dll.SDL_GetSurfaceColorMod(ctypes.byref(surface), ctypes.byref(r), ctypes.byref(g), ctypes.byref(b)) if ret == 0: return(r.value, g.value, b.value) return None
def __getAnswer__(self): GetAnswerInfoTS=__MdsIpShr.GetAnswerInfoTS GetAnswerInfoTS.argtypes=[_C.c_int32,_C.POINTER(_C.c_ubyte),_C.POINTER(_C.c_ushort),_C.POINTER(_C.c_ubyte), _C.c_void_p,_C.POINTER(_C.c_ulong),_C.POINTER(_C.c_void_p),_C.POINTER(_C.c_void_p)] MdsIpFree=__MdsIpShr.MdsIpFree MdsIpFree.argtypes=[_C.c_void_p] dtype=_C.c_ubyte(0) length=_C.c_ushort(0) ndims=_C.c_ubyte(0) dims=_N.array([0,0,0,0,0,0,0,0],dtype=_N.uint32) numbytes=_C.c_ulong(0) ans=_C.c_void_p(0) mem=_C.c_void_p(0) status=GetAnswerInfoTS(self.socket,dtype,length,ndims,dims.ctypes.data,numbytes,_C.pointer(ans),_C.pointer(mem)) dtype=dtype.value if dtype == 10: dtype = _data.Float32.dtype_mds elif dtype == 11: dtype = _data.Float64.dtype_mds elif dtype == 12: dtype = _data.Complex64.dtype_mds elif dtype == 13: dtype = _data.Complex128.dtype_mds if ndims.value == 0: if dtype == _dtypes.DTYPE_T: ans=String(_C.cast(ans,_C.POINTER(_C.c_char*length.value)).contents.value) else: kls=_data.Scalar.mdsdtypeToClass[dtype] ans=kls(_C.cast(ans,_C.POINTER(kls.dtype_ctypes)).contents.value) else: val=descriptor_a() val.dtype=dtype val.dclass=4 val.length=length.value val.pointer=ans val.scale=0 val.digits=0 val.aflags=0 val.dimct=ndims.value val.arsize=numbytes.value val.a0=val.pointer if val.dimct > 1: val.coeff=1 for i in range(val.dimct): val.coeff_and_bounds[i]=int(dims[i]) ans=val.value if not ((status & 1) == 1): if mem.value is not None: MdsIpFree(mem) if isinstance(ans,String): raise MdsException(str(ans)) else: raise MdsException(MdsGetMsg(status)) if mem.value is not None: MdsIpFree(mem) return ans
def stopMotor(self, stop_mode = 'smooth'): mode_byte = STP_DISABLE_AMP if stop_mode == 'smooth': mode_byte |= STOP_SMOOTH elif stop_mode == 'abrupt': mode_byte |= STOP_ABRUPT else: msg = "invalid stop_mode, must be 'smooth' or 'abrupt'" raise LdcnError(msg) self._libldcn.StepStopMotor(c_ubyte(self.addr), c_ubyte(mode_byte))
def setCursor(self, x, y) : """Set the cursor position, which is where the next text will be drawn.""" self._endOp() self._waitOnRefresh() # Convert to 8 bit two's complement representation x = ctypes.c_ubyte(int(x)).value y = ctypes.c_ubyte(int(y)).value self._sendOp([self._OpSetCursor, x, y])
def get_pattern(self, pos): fading = c_ushort() red = c_ubyte() green = c_ubyte() blue = c_ubyte() with self._blink1 as dev: libblink1.readPatternLine(dev, byref(fading), byref(red), byref(green), byref(blue), pos) return Blink1Pattern(red.value, green.value, blue.value, fading.value)
def get_texture_color_mod(texture): """Gets the additional color value used in render copy operations. """ r = ctypes.c_ubyte(0) g = ctypes.c_ubyte(0) b = ctypes.c_ubyte(0) retval = dll.SDL_GetTextureColorMod(ctypes.byref(texture), ctypes.byref(r), ctypes.byref(g), ctypes.byref(b)) if retval == -1: raise SDLError() return r.value, g.value, b.value
def play_state(self): playing = c_ubyte() start = c_ubyte() stop = c_ubyte() count = c_ubyte() pos = c_ubyte() with self._blink1 as dev: libblink1.readPlayState(dev, byref(playing), byref(start), byref(stop), byref(count), byref(pos)) return Blink1PlayState(playing.value, start.value, stop.value, count.value, pos.value)
def loadTrajectory(self, pos, speed, acc): # Load Trajectory --------------------------------------- # Position mode (Velocity mode: mode = START_NOW | LOAD_SPEED | LOAD_ACC) mode_byte = START_NOW | LOAD_SPEED | LOAD_ACC | LOAD_POS pos *= 25 #multiple by 25 for pico motor FIXME steptime = 0 #always should be zero? self._libldcn.StepLoadTraj(c_ubyte(self.addr), c_ubyte(mode_byte), c_long(pos), c_ubyte(speed), c_ubyte(acc), c_ushort(steptime), )
def get_render_draw_color(renderer): """Gets the color used for drawing operations (rect, line and clear) as RGBA tuple. """ r = ctypes.c_ubyte(0) g = ctypes.c_ubyte(0) b = ctypes.c_ubyte(0) a = ctypes.c_ubyte(0) retval = dll.SDL_GetRenderDrawColor(ctypes.byref(renderer), ctypes.byref(r), ctypes.byref(g), ctypes.byref(b), ctypes.byref(a)) if retval == -1: raise SDLError() return r.value, g.value, b.value, a.value
def __getAnswer__(self): dtype=_C.c_ubyte(0) length=_C.c_ushort(0) ndims=_C.c_ubyte(0) dims=_N.array([0,0,0,0,0,0,0,0],dtype=_N.uint32) numbytes=_C.c_ulong(0) ans=_C.c_void_p(0) mem=_C.c_void_p(0) status=GetAnswerInfoTS(self.socket,dtype,length,ndims,dims.ctypes.data,numbytes,_C.pointer(ans),_C.pointer(mem)) dtype=dtype.value if dtype == _dtypes.DTYPE_F: dtype = _dtypes.DTYPE_FLOAT elif dtype == _dtypes.DTYPE_D: dtype = _dtypes.DTYPE_DOUBLE elif dtype == _dtypes.DTYPE_FC: dtype = _dtypes.DTYPE_FLOAT_COMPLEX elif dtype == _dtypes.DTYPE_DC: dtype = _dtypes.DTYPE_DOUBLE_COMPLEX if ndims.value == 0: if dtype == _dtypes.DTYPE_T: ans=_scalar.String(_C.cast(ans,_C.POINTER(_C.c_char*length.value)).contents.value) else: ans=Connection.dtype_to_scalar[dtype](_C.cast(ans,_C.POINTER(_dtypes.mdsdtypes.ctypes[dtype])).contents.value) else: val=descriptor_a() val.dtype=dtype val.dclass=4 val.length=length.value val.pointer=ans val.scale=0 val.digits=0 val.aflags=0 val.dimct=ndims.value val.arsize=numbytes.value val.a0=val.pointer if val.dimct > 1: val.coeff=1 for i in range(val.dimct): val.coeff_and_bounds[i]=int(dims[i]) ans=val.value if not ((status & 1) == 1): if mem.value is not None: MdsIpFree(mem) if isinstance(ans,_scalar.String): raise MdsException(str(ans)) else: raise MdsException(MdsGetMsg(status)) if mem.value is not None: MdsIpFree(mem) return ans
def _adjust(a, a_offset, b): """ a = bytearray a_offset = int b = bytearray """ x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1 a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value x >>= 8 for i in range(len(b)-2, -1, -1): x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF) a[a_offset + i] = ctypes.c_ubyte(x).value x >>= 8
def __getAnswer__(self): dtype=_C.c_ubyte(0) length=_C.c_ushort(0) ndims=_C.c_ubyte(0) dims=_N.array([0,0,0,0,0,0,0,0],dtype=_N.uint32) numbytes=_C.c_ulong(0) ans=_C.c_void_p(0) mem=_C.c_void_p(0) status=self.__GetAnswerInfoTS(self.socket,dtype,length,ndims,dims.ctypes.data,numbytes,_C.pointer(ans),_C.pointer(mem)) dtype=dtype.value if dtype == DTYPE_F: dtype = DTYPE_FLOAT elif dtype == DTYPE_D: dtype = DTYPE_DOUBLE elif dtype == DTYPE_FC: dtype = DTYPE_FLOAT_COMPLEX elif dtype == DTYPE_DC: dtype = DTYPE_DOUBLE_COMPLEX if ndims.value == 0: val=descriptor() val.dtype=dtype val.dclass=1 val.length=length.value val.pointer=_C.cast(ans,_C.POINTER(descriptor)) ans=val.value else: val=descriptor_a() val.dtype=dtype val.dclass=4 val.length=length.value val.pointer=ans val.scale=0 val.digits=0 val.aflags=0 val.dimct=ndims.value val.arsize=numbytes.value val.a0=val.pointer if val.dimct > 1: val.coeff=1 for i in range(val.dimct): val.coeff_and_bounds[i]=int(dims[i]) ans=val.value if not ((status & 1) == 1): if mem.value is not None: self.__mdsipFree(mem) raise MdsException,MdsGetMsg(status) if mem.value is not None: self.__mdsipFree(mem) return ans
def drawCircle(self, x, y, radius) : """ Draw a circle centered at (x,y) with the specified radius. x and y must be between -128 and 127. radius must be between 0 and 255 """ self._endOp() self._waitOnRefresh(); # Convert to 8 bit two's complement representation x = ctypes.c_ubyte(int(x)).value y = ctypes.c_ubyte(int(y)).value radius = int(radius) self._sendOp([self._OpDrawCircle, x, y, radius])
def SetChars(self, eventchar=0x82, eventcharenable=0, errorchar=0x88, errorcharenable=0): dll.FT_SetChars.argtypes = [ctypes.c_ulong, ctypes.c_ubyte, ctypes.c_ubyte, ctypes.c_ubyte] dll.FT_SetChars.restypes = ctypes.c_ulong self._eventchar = ctypes.c_ubyte(eventchar) self._eventcharenable = ctypes.c_ubyte(eventcharenable) self._errorchar = ctypes.c_ubyte(errorchar) self._errorcharenable = ctypes.c_ubyte(errorcharenable) if dll.FT_SetChars(self._handle, self._eventchar, self._eventcharenable, self._errorchar, self._errorcharenable) != 0: print STATUS_CODES[dll.FT_SetChars(self._handle, self._eventchar, self._eventcharenable, self._errorchar, self._errorcharenable)] else: print 'Configured event and error characters were successfully updated ' print 'Event Character: %s' % chr(self._eventchar.value) print 'Event Character Status: %s' % ('DISABLED' if self._eventcharenable.value == 0 else 'ENABLED') print 'Error Character: %s' % chr(self._errorchar.value) print 'Error Character Status: %s' % ('DISABLED' if self._eventcharenable.value == 0 else 'ENABLED')
def write_report(self, data): # clear input queue. we can do this because we expect at most one # answer per written report and no spontaneous messages while self.receiving_queue.qsize(): self.receiving_queue.get_nowait() # write report self.hid_device.send_output_report([ctypes.c_ubyte(x) for x in data])
def read(self, pipe_id, length_buffer): read_buffer = create_string_buffer(length_buffer) result = self.api.exec_function_winusb(WinUsb_ReadPipe, self.handle_winusb, c_ubyte(pipe_id), read_buffer, c_ulong(length_buffer), byref(c_ulong(0)), None) if result != 0: return read_buffer else: return None
def encap_push_pdu(self, pdu_bytes, channel=0, label=[1,2,3,4,5,6], protocol=0x0800): ''' Push a PDU into the Tx packet buffer The contents of the PDU, pdu_bytes, should be a sequence of bytes. ''' if channel < 0 or channel >= self.num_channels: raise LibgseWrapperError('Channel number out of range: {}'.format(channel)) if len(label) != 6: raise LibgseWrapperError('Only 6-byte labels supported: {}'.format(label)) num_bytes = min(len(pdu_bytes), 65536) for n in range(num_bytes): self.c_data[n] = pdu_bytes[n] for n in range(6): self.c_label[n] = label[n] self.c_labeltype = c_ubyte(0) self.c_protocol = c_ushort(protocol) status = self.libgse.gse_create_vfrag_with_data(byref(self.c_pdu_vfrag), num_bytes, gse_max_header_length, gse_max_trailer_length, self.c_data, num_bytes) if status != 0: raise LibgseWrapperError('libgse/gse_create_vfrag_with_data: {}'.format(hex(status))) status = self.libgse.gse_encap_receive_pdu(self.c_pdu_vfrag, self.c_encap_state, self.c_label, self.c_labeltype, self.c_protocol, channel) if status != 0: raise LibgseWrapperError('libgse/gse_encap_receive_pdu: {}'.format(hex(status)))
def __init__(self, anchors, edge_length=0.25, distance_margin=1.5): """ Initialise with a dictionary of anchors (id->(x,y), and other parameters) """ super(LeDLL, self).__init__() self.anchors = anchors self.edge_length = edge_length self.distance_margin = distance_margin anchors_array_type = ctypes.c_double * (len(anchors) * 2) anchors_array = anchors_array_type() distances_array_type = ctypes.c_double * len(anchors) self.distances_array = distances_array_type() self.n_distance = ctypes.c_ubyte(len(anchors)) location_array_type = ctypes.c_double * 2 self.location_array = location_array_type() anchor_ids = anchors.keys() anchor_ids.sort() self.anchor_ids = anchor_ids for i, anchor_id in enumerate(anchor_ids): x, y = anchors[anchor_id] anchors_array[i * 2] = x anchors_array[i * 2 + 1] = y handle = self.le_create(ctypes.c_double(edge_length), ctypes.c_double(distance_margin), ctypes.c_byte(len(anchors)), anchors_array) self.handle = ctypes.c_int(handle)
def insert(self, id, coordinates, obj = None): """Inserts an item into the index with the given coordinates. :param id: long integer A long integer that is the identifier for this index entry. IDs need not be unique to be inserted into the index, and it is up to the user to ensure they are unique if this is a requirement. :param coordinates: sequence or array This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. :param obj: a pickleable object. If not None, this object will be stored in the index with the :attr:`id`. The following example inserts an entry into the index with id `4321`, and the object it stores with that id is the number `42`. The coordinate ordering in this instance is the default (interleaved=True) ordering:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42) """ p_mins, p_maxs = self.get_coordinate_pointers(coordinates) if obj is not None: size, data, pyserialized = self._serialize(obj) else: data = ctypes.c_ubyte(0) size = 0 core.rt.Index_InsertData(self.handle, id, p_mins, p_maxs, self.properties.dimension, data, size)
def receive_cbp(): """Receive a count byte preceded payload""" ##trace("receive_cbp") ##bufsize = MAX_RX_SIZE bufsize = 255 # testing Buffer = ctypes.c_ubyte * bufsize rxbuf = Buffer() buflen = ctypes.c_ubyte(bufsize) #RADIO_RESULT radio_get_payload_cbp(uint8_t* buf, uint8_t buflen) result = radio_get_payload_cbp_fn(rxbuf, buflen) if result != 0: # RADIO_RESULT_OK raise RuntimeError("Receive failed, radio.c error code %s" % hex(result)) size = 1+rxbuf[0] # The count byte in the payload # turn buffer into a list of bytes, using 'size' as the counter rxlist = [] for i in range(size): rxlist.append(rxbuf[i]) ##trace("receive_cbp returhs %s" % tohex(rxlist)) return rxlist # Python len(rxlist) tells us how many bytes including length byte if present
def setDisplayCharacter(self, row, column, character): """Sets a single character on the display. On linux, it has been found that encoding the string with iso-8859-15 encoding prevents a "UnicodeEncodeError" that would occur and ensure the character being displayed is correct between windows and linux. e.g. textLCD.setDisplayCharacter(0, 0, chr(223).encode("iso-8859-15")) Parameters: row<int>: the index of the row to write the character to. column<int>: the index of the column to write the character to. character<char>: the character to display. Exceptions: RuntimeError - If current platform is not supported/phidget c dll cannot be found PhidgetException: If this Phidget is not opened and attached, or if the row index is invalid. """ try: result = PhidgetLibrary.getDll().CPhidgetTextLCD_setDisplayCharacter( self.handle, c_int(row), c_int(column), c_ubyte(character) ) except RuntimeError: raise if result > 0: raise PhidgetException(result)
def transmit(payload, outer_times=1, inner_times=8, outer_delay=0): """Transmit a single payload using the present modulation scheme""" #Note, this optionally does a mode change before and after #extern void radio_transmit(uint8_t* payload, uint8_t len, uint8_t repeats); framelen = len(payload) if framelen < 1 or framelen > 255: raise ValueError("frame len must be 1..255") if outer_times < 1: raise ValueError("outer_times must be >0") if inner_times < 1 or inner_times > 255: raise ValueError("tx times must be 0..255") framelen = len(payload) Frame = ctypes.c_ubyte * framelen txframe = Frame(*payload) inner_times = ctypes.c_ubyte(inner_times) for i in range(outer_times): #TODO: transmit() will mode change if required #this means that outer_times will keep popping and pushing the mode #that might be ok, as it will force all the flags to clear? radio_transmit_fn(txframe, framelen, inner_times) if outer_delay != 0: time.sleep(outer_delay)
def get_devices(): """Gets a list of the XenQore devices that is detected by driver And it initializes the nmengine library """ global _lib result = SUCCESS devices = [] if _lib == None: # print('platform',platform.architecture(), 'uname', platform.uname()[0]) # Load appropriate library for the operating platform if platform.uname()[0] == 'Windows': name = _lib_path + 'xenqore.dll' elif platform.uname()[0] == 'Linux': name = _lib_path + 'libxenqore.so' elif platform.uname()[0] == 'Darwin': name = _lib_path + 'libxenqore.dylib' else: name = _lib_path + 'libxenqore.so' #print('dll name', name) _lib = ctypes.cdll.LoadLibrary(name) # Up to five device information is inquired. detect_count = ctypes.c_ubyte(5) devices_tmp = (Device * detect_count.value)() result = _lib.xq_get_devices(ctypes.byref(devices_tmp), ctypes.byref(detect_count)) for i in range(detect_count.value): devices.append(devices_tmp[i]) return result, devices
def put_tms_tdi_bits(self, buffer, cbitpairs, overlap=False, want_reply=False): # TODO: reconsider this allocation strategy if speed or memory usage becomes an issue import ctypes send_buffer = (ctypes.c_ubyte * len(buffer))() for i in range(len(buffer)): send_buffer[i] = ctypes.c_ubyte(buffer[i] & 0xff) recv_buffer = None if want_reply: recv_buffer = (ctypes.c_ubyte * len(buffer))() if not lowlevel.DjtgPutTmsTdiBits(self._hif, send_buffer, recv_buffer, cbitpairs, overlap): raise JtagError('General Jtag Error', 'Unable to send tms, tdi bits') if want_reply: return recv_buffer[:]
def test_clock(self, mode): """ Test the sample clock pin (CLK). This function exercises the CLK pin in output mode and returns the value read at the pin for input testing. Return the mode to input after testing the pin. Args: mode (int): The CLK pin mode * 0 = input * 1 = output low * 2 = output high * 3 = output 1 kHz square wave Returns: int: the value read at the CLK pin after applying the mode (0 or 1). Raises: HatError: the board is not initialized, does not respond, or responds incorrectly. ValueError: the mode is invalid. """ if not self._initialized: raise HatError(self._address, "Not initialized.") if mode not in range(4): raise ValueError("Invalid mode. Must be 0-3.") data_value = c_ubyte() result = self._lib.mcc118_test_clock(self._address, mode, byref(data_value)) if result == self._RESULT_BUSY: raise HatError(self._address, "Cannot test the CLK pin while a scan is running.") elif result != self._RESULT_SUCCESS: raise HatError(self._address, "Incorrect response.") return data_value.value
def receive_data(self, endpoint, attributes, maxPacketSize, dataptr, p_length, timeout): return if endpoint == 0x82: return if self.ptr < len(self.text): pkt = [0x00] * 8 if self.key_presed: self.key_presed = False self.ptr += 1 else: self.key_presed = True key, mod = keymap.get_keycode(self.text[self.ptr]) if key: pkt[2] = key if mod: pkt[0] = mod for i in range(len(pkt)): dataptr[i] = c_ubyte(pkt[i]) p_length[0] = len(pkt)
def broadcast_ping(self, maxId, port=0, doPrint=False): # Try to broadcast ping the Dynamixel dynamixel.broadcastPing(self.port_num[port], self.protocol) dxl_comm_result = dynamixel.getLastTxRxResult(self.port_num[port], self.protocol) if dxl_comm_result != COMM_SUCCESS: if doPrint: print(dynamixel.getTxRxResult(self.protocol, dxl_comm_result)) return False if doPrint: print("Detected Dynamixel : ") nb_detected = 0 for id in range(1, int(maxId) + 1): if ctypes.c_ubyte( dynamixel.getBroadcastPingResult(self.port_num[port], self.protocol, id)).value: nb_detected += 1 if doPrint: print("[ID:%03d]" % (id)) if nb_detected == maxId: return True
def rfid_reader_info(reader): ''' Print reader info ''' ucInfo = (ctypes.c_ubyte * 300)() reader_info = libfeisc.FEISC_0x66_ReaderInfo reader_info.argtypes = [ ctypes.c_int, ctypes.c_ubyte, ctypes.c_ubyte, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_int ] back = reader_info(reader, ctypes.c_ubyte(255), 0x00, ucInfo, 0) if (back == 0): print('Reader: {}'.format(ucInfo[4])) elif (back > 0): print('--- ! Reader info status {}'.format(rfid_status_text(back))) else: print('--- ! Reader info error {}'.format(back)) print(rfid_error_text(ctypes.c_int(back)))
def thread_debug(reserved): global glob_firmware_loaded global glob_stop global glob_conn_id # loop while not glob_stop: if glob_firmware_loaded and cfg_debug_enabled: msg_size = 2048 msg = ctypes.create_string_buffer(msg_size) size = ctypes.c_int32(msg_size) glob_eclib_mutex.acquire() error = eclib.BL_GetMessage(glob_conn_id, ctypes.c_ubyte(cfg_channel), ctypes.byref(msg), ctypes.byref(size)) glob_eclib_mutex.release() if error != eclib.ErrorCodeEnum.ERR_NOERROR: exit(EXIT_GetMessage, error) elif size.value > 0: log("Firmware debug: " + msg.value) else: pass time.sleep(0.25)
def read_file(file_p, format, size, offset, endianess='>'): little_endian_format = endianess + format file_p.seek(offset, FILE_BEGIN) buf = file_p.read(size) value = struct.unpack(little_endian_format, buf)[0] if format == INTEGER: return c_int(value) elif format == UINTEGER: return c_uint(value) elif format == SHORT: return c_short(value) elif format == USHORT: return c_ushort(value) elif format == BYTE: return c_ubyte(value) elif format == FLOAT: return c_float(value) elif format == LONG: return c_long(value) elif format == DOUBLE: return c_double(value)
def insert(self, id, coordinates, velocities, tstart, tend, obj=None): core.rt.Index_InsertTPData.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_uint32 ] core.rt.Index_InsertData.restype = ctypes.c_int core.rt.Index_InsertData.errcheck = core.check_return p_mins, p_maxs = self.get_coordinate_pointers(coordinates) v_mins, v_maxs = self.get_coordinate_pointers(velocities) data = ctypes.c_ubyte(0) size = 0 pyserialized = None if obj is not None: size, data, pyserialized = self._serialize(obj) core.rt.Index_InsertTPData(self.handle, id, p_mins, p_maxs, v_mins, v_maxs, tstart, tend, self.properties.dimension, data, size)
def CaptureLoop(self): #featureValue = c_uint(0x1) #self.dll.tira_access_feature(0xF0000000, 1, byref(featureValue), 0x0) dll = self.dll if dll is None: return dll.tira_start_capture() size = c_int() data = pointer(c_ubyte()) while self.shouldRun: dll.tira_get_captured_data(byref(data), byref(size)) if size.value != 0: break else: time.sleep(0.01) if self.shouldRun: result = "" for x in data[:size.value]: result += chr(x) self.result = result dll.tira_delete(data) else: dll.tira_cancel_capture()
def ReceiveThread(self, stopEvent): dll = self.dll ledReset = self.ledTimer.Reset ledIrOnFlags = self.ledIrOnFlags timeCodeDiagram = (c_ubyte * 256)() diagramLength = c_int(0) portDirection = c_ubyte() dll.DoGetDataPortDirection(byref(portDirection)) portDirection.value |= 3 dll.DoSetDataPortDirection(portDirection) dll.DoSetOutDataPort(self.ledIrOffFlags) decode = self.irDecoder.Decode dll.DoGetInfraCode(timeCodeDiagram, 0, byref(diagramLength)) while not stopEvent.isSet(): dll.DoGetInfraCode(timeCodeDiagram, 0, byref(diagramLength)) if diagramLength.value: dll.DoSetOutDataPort(ledIrOnFlags) ledReset(1) timeCodeDiagram[diagramLength.value] = 255 decode(timeCodeDiagram, diagramLength.value + 1) else: stopEvent.wait(0.01) dll.DoSetOutDataPort(self.ledIrOffFlags)
def rfid_reader_lan_configuration_write(reader, conf, ip): ''' Write LAN reader configuration, input is the reader handler, the 14 bytes configuration block as obtained from the 'read' function above and an array with the new ip, e.g. [192, 168, 142, 12] This will be written to the EPPROM and requires a SystemReset to be saved in the configuration. ''' if len(ip) != 4: print('Invalid ip, expected array of 4 numbers.') return False reader_conf = libfeisc.FEISC_0x81_WriteConfBlock reader_conf.argtypes = [ ctypes.c_int, ctypes.c_ubyte, ctypes.c_ubyte, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_int ] conf[0] = ip[0] conf[1] = ip[1] conf[2] = ip[2] conf[3] = ip[3] back = reader_conf(reader, ctypes.c_ubyte(255), 168, conf, 0) if (back == 0): return True elif (back > 0): print('--- ! Reader conf status {}'.format(rfid_status_text(back))) return False else: print('--- ! Reader conf error {}'.format(back)) print(rfid_error_text(ctypes.c_int(back))) return False
def sync_write(port, block, data): """ Write to multiple DXLs in synchronized fashion This instruction is used to control multiple Dynamixels simultaneously with a single Instruction Packet transmission. When this instruction is used, several instructions can be transmitted at once, so that the communication time is reduced when multiple Dynamixels are connected in a single channel. However, the SYNC WRITE instruction can only be used to a single address with an identical length of data over connected Dynamixels. ID should be transmitted as Broadcasting ID. Args: port: Dynamixel portHandler object block: An instance of Contiguous Registers (defined in dxl_reg) containing the register to write to data: A zip of 2 lists - dxl_ids and values. """ address = block.offset length = block.width group_num = dynamixel.groupSyncWrite(port, PROTOCOL_VERSION, address, length) for ind, (dxl_id, value) in enumerate(data): dxl_addparam_result = ctypes.c_ubyte( dynamixel.groupSyncWriteAddParam(group_num, dxl_id, value, length)).value if dxl_addparam_result != 1: print(dxl_addparam_result) print("[ID:%03d] groupSyncWrite addparam failed" % (dxl_id)) # Syncwrite goal position dynamixel.groupSyncWriteTxPacket(group_num) dxl_comm_result = dynamixel.getLastTxRxResult(port, PROTOCOL_VERSION) if dxl_comm_result != COMM_SUCCESS: print(dynamixel.getTxRxResult(PROTOCOL_VERSION, dxl_comm_result)) # Clear syncwrite parameter storage dynamixel.groupSyncWriteClearParam(group_num)
def interface_list(self): """`interface_list`. Queries the connected DGI device for available interfaces. Refer to the DGI documentation to resolve the ID. `int interface_list(uint32_t dgi_hndl, unsigned char* interfaces, unsigned char* count)` +------------+------------+ | Parameter | Description | +============+============+ | *dgi_hndl* | Handle of the connection | | *interfaces* | Buffer to hold the ID of the available interfaces. Should be able to hold minimum 10 elements, but a larger count should be used to be future proof. | | *count* | Pointer to a variable that will be set to the number of interfaces registered in buffer. | +------------+------------+ :return: List of available interfaces :rtype: list(int) :raises: :exc:`DeviceReturnError` """ interfaces = (c_ubyte * NUM_INTERFACES)() interfaceCount = c_ubyte() res = self.dgilib.interface_list(self.dgi_hndl, byref(interfaces), byref(interfaceCount)) if self.verbose: print( f"\t{res} interface_list: {interfaces[:interfaceCount.value]}," f" interfaceCount: {interfaceCount.value}") if res: raise DeviceReturnError(f"interface_list returned: {res}") return interfaces[:interfaceCount.value]
def sendPdnConnectivityReq( self, ue_id, apn, pdn_type=1, pcscf_addr_type=None ): req = s1ap_types.uepdnConReq_t() req.ue_Id = ue_id # Initial Request req.reqType = 1 req.pdnType_pr.pres = 1 # PDN Type 1 = IPv4, 2 = IPv6, 3 = IPv4v6 req.pdnType_pr.pdn_type = pdn_type req.pdnAPN_pr.pres = 1 req.pdnAPN_pr.len = len(apn) req.pdnAPN_pr.pdn_apn = (ctypes.c_ubyte * 100)( *[ctypes.c_ubyte(ord(c)) for c in apn[:100]] ) print("********* PDN type", pdn_type) # Populate PCO if pcscf_addr_type is set if pcscf_addr_type: print("********* pcscf_addr_type", pcscf_addr_type) self._s1_util.populate_pco(req.protCfgOpts_pr, pcscf_addr_type) self.s1_util.issue_cmd(s1ap_types.tfwCmd.UE_PDN_CONN_REQ, req) print("************* Sending Standalone PDN Connectivity Request\n")
def __init__(self, channel, bitrate=500000, poll_interval=0.01, **kwargs): """ :param int channel: Device number :param int bitrate: Bitrate in bits/s :param float poll_interval: Poll interval in seconds when reading messages """ if iscan is None: raise ImportError("Could not load isCAN driver") self.channel = ctypes.c_ubyte(int(channel)) self.channel_info = "IS-CAN: %s" % channel if bitrate not in self.BAUDRATES: valid_bitrates = ", ".join(str(bitrate) for bitrate in self.BAUDRATES) raise ValueError("Invalid bitrate, choose one of " + valid_bitrates) self.poll_interval = poll_interval iscan.isCAN_DeviceInitEx(self.channel, self.BAUDRATES[bitrate]) super(IscanBus, self).__init__(channel=channel, bitrate=bitrate, poll_interval=poll_interval, **kwargs)
def test_esm_information(self): """ Testing of sending Esm Information procedure """ num_ues = 1 self._s1ap_wrapper.configUEDevice(num_ues) print("************************* sending Attach Request for ue-id : 1") attach_req = s1ap_types.ueAttachRequest_t() attach_req.ue_Id = 1 sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT id_type = s1ap_types.TFW_MID_TYPE_IMSI eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH attach_req.mIdType = id_type attach_req.epsAttachType = eps_type attach_req.useOldSecCtxt = sec_ctxt # enabling ESM Information transfer flag attach_req.eti.pres = 1 attach_req.eti.esm_info_transfer_flag = 1 print("Sending Attach Request ue-id", attach_req.ue_Id) self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value) print("Received auth req ind ") auth_res = s1ap_types.ueAuthResp_t() auth_res.ue_Id = 1 sqn_recvd = s1ap_types.ueSqnRcvd_t() sqn_recvd.pres = 0 auth_res.sqnRcvd = sqn_recvd print("Sending Auth Response ue-id", auth_res.ue_Id) self._s1ap_wrapper._s1_util.issue_cmd(s1ap_types.tfwCmd.UE_AUTH_RESP, auth_res) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_SEC_MOD_CMD_IND.value) print("Received Security Mode Command ue-id", auth_res.ue_Id) time.sleep(1) sec_mode_complete = s1ap_types.ueSecModeComplete_t() sec_mode_complete.ue_Id = 1 self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_SEC_MOD_COMPLETE, sec_mode_complete) # Esm Information Request indication print("Received Esm Information Request ue-id", sec_mode_complete.ue_Id) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_ESM_INFORMATION_REQ.value) esm_info_req = response.cast(s1ap_types.ueEsmInformationReq_t) # Sending Esm Information Response print("Sending Esm Information Response ue-id", sec_mode_complete.ue_Id) esm_info_response = s1ap_types.ueEsmInformationRsp_t() esm_info_response.ue_Id = 1 esm_info_response.tId = esm_info_req.tId esm_info_response.pdnAPN_pr.pres = 1 s = "magma.ipv4" esm_info_response.pdnAPN_pr.len = len(s) esm_info_response.pdnAPN_pr.pdn_apn = (ctypes.c_ubyte * 100)( *[ctypes.c_ubyte(ord(c)) for c in s[:100]]) self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_ESM_INFORMATION_RSP, esm_info_response) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value) # Trigger Attach Complete attach_complete = s1ap_types.ueAttachComplete_t() attach_complete.ue_Id = 1 self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_ATTACH_COMPLETE, attach_complete) # Wait on EMM Information from MME self._s1ap_wrapper._s1_util.receive_emm_info() print("*** Running UE detach ***") # Now detach the UE detach_req = s1ap_types.uedetachReq_t() detach_req.ue_Id = 1 detach_req.ueDetType = ( s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value) self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_DETACH_REQUEST, detach_req) # Wait for UE context release command response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value)
def read (): data= ct.c_ubyte(0)
clock1_pin=22 stat=None x=None y=None wiringpi.wiringPiSetupGpio() wiringpi.pinMode(data_pin,0) wiringpi.pinMode(data_pin,1) def gohi(pin): wiringpi.pinMode(pin, 0) wiringpi.digitalWrite(pin, 1) def golow(pin): wiringpi.pinMode(pin, 1) wiringpi.digitalWrite(pin,0) def write (ct.c_ubyte(data)): parity=ct.c_ubyte(1) gohi(data_pin) gohi(clock1_pin) ti.sleep(0.0003) golo(clock1_pin) ti.sleep(0.0003) golo(data_pin) ti.sleep(0.00001) gohi(clock1_pin) while(wiringpi.digitalRead(clock1_pin)==1) for i in range(0,8): if ((data & 1)==1): gohi(data_pin) else: golo(data_pin) while(wiringpi.digitalRead(clock1_pin)==0)
def Concat2d( out_data # , in_dataA # in_rowsA x in_colsA , in_dataB # in_rowsB x in_colsB , dim=0 , rigor=False , verbose=False): """ Returns True on success, otherwize returns False Applies a 2D Concatenation over two 2-dimensional input data Note that all nd-array lists are NumPy (mutable), not PyTorch tensor (immutable). :param out_data: <mutable> output data, out_data[][] :param in_dataA: input data, in_dataA[in_rowsA][in_colsA] :param in_dataB: input data, in_dataB[in_rowsB][in_colsB] :param dim: dimension to concatenate, 0 or 1 :param rigor: check values rigorously when 'True' :param verbose: output message more when 'True' :return: 'True' on success, 'False' on failure. Follwoings are derived from input arguments . out_rows: . out_cols: . in_rowsA: . in_colsA: . in_rowsB: . in_colsB: . dim: Following is an example usage for PyTorch. Concat2d( tensor_out_data.data.numpy() , tenso_in_dataA.data.numpy() , tenso_in_dataB.data.numpy() , dim , rigor=True , verbose=True) """ if rigor: error =0 if (out_data.ndim!=2): error += 1 if verbose: dlr_common.DlrError("out_data is not 2 dim") if (in_dataA.ndim!=2): error += 1 if verbose: dlr_common.DlrError("in_data is not 2 dim") if (in_dataB.ndim!=2): error += 1 if verbose: dlr_common.DlrError("in_data is not 2 dim") if (dim!=0) and (dim!=1): error += 1 if verbose: dlr_common.DlrError("dim should be 0 or 1") t_in_rowsA = in_dataA.shape[0] t_in_colsA = in_dataA.shape[1] t_in_rowsB = in_dataB.shape[0] t_in_colsB = in_dataB.shape[1] if dim==0: t_out_rows = in_dataA.shape[0]+in_dataB.shape[0] t_out_cols = in_dataA.shape[1] else: t_out_rows = in_dataA.shape[0] t_out_cols = in_dataA.shape[1]+in_dataB.shape[1] if (t_out_rows!=out_data.shape[0]): error += 1 if verbose: dlr_common.DlrError("out data row count error") if (t_out_cols!=out_data.shape[1]): error += 1 if verbose: dlr_common.DlrError("out data column count error") if dim==0: if (t_in_colsA!=t_in_colsB): error += 1 if verbose: dlr_common.DlrError("in dimension eror") else: if (t_in_rowsA!=t_in_rowsB): error += 1 if verbose: dlr_common.DlrError("in dimension eror") if verbose: dlr_common.DlrInfo(f"out_data={out_data.shape}") dlr_common.DlrInfo(f"in_dataA={in_dataA.shape}") dlr_common.DlrInfo(f"in_dataB={in_dataB.shape}") dlr_common.DlrInfo(f"dim ={dim}") if (error!=0): dlr_common.DlrError("parameter mis-match"); return False #_fname='' #_ctype='' if out_data.dtype.type == np.int32: _fname = 'Concat2dInt' _ctype = ctypes.c_int elif out_data.dtype.type == np.float32: _fname = 'Concat2dFloat' _ctype = ctypes.c_float elif out_data.dtype.type == np.float64: _fname = 'Concat2dDouble' _ctype = ctypes.c_double else: dlr_common.DlrError("not support "+str(out_data.dtype.type)) return False _Concat2d=dlr_common.WrapFunction(dlr_common._dlr ,_fname , None # return type ,[ctypes.POINTER(_ctype) # output ,ctypes.POINTER(_ctype) # input ,ctypes.POINTER(_ctype) # input ,ctypes.c_ushort # in_rowsA ,ctypes.c_ushort # in_colsA ,ctypes.c_ushort # in_rowsB ,ctypes.c_ushort # in_colsB ,ctypes.c_ubyte # dim ,ctypes.c_int # rigor ,ctypes.c_int # verbose ]) CP_out_data = out_data.ctypes.data_as(ctypes.POINTER(_ctype)) CP_in_dataA = in_dataA.ctypes.data_as(ctypes.POINTER(_ctype)) CP_in_dataB = in_dataB.ctypes.data_as(ctypes.POINTER(_ctype)) CP_in_rowsA = ctypes.c_ushort(in_dataA.shape[0]) CP_in_colsA = ctypes.c_ushort(in_dataA.shape[1]) CP_in_rowsB = ctypes.c_ushort(in_dataB.shape[0]) CP_in_colsB = ctypes.c_ushort(in_dataB.shape[1]) CP_dim = ctypes.c_ubyte(dim) CP_rigor = 1 if rigor else 0 CP_verbose = 1 if verbose else 0 _Concat2d(CP_out_data ,CP_in_dataA ,CP_in_dataB ,CP_in_rowsA ,CP_in_colsA ,CP_in_rowsB ,CP_in_colsB ,CP_dim ,CP_rigor ,CP_verbose ) return True
def createTrack(self, output): import os from operator import itemgetter from isceobj import Constants as CN from ctypes import cdll, c_char_p, c_int, c_ubyte, byref lib = cdll.LoadLibrary(os.path.dirname(__file__) + '/concatenate.so') # Perhaps we should check to see if Xmin is 0, if it is not, strip off the header self.logger.info( "Adjusting Sampling Window Start Times for all Frames") # Iterate over each frame object, and calculate the number of samples with which to pad it on the left and right outputs = [] totalWidth = 0 auxList = [] for frame in self._frames: # Calculate the amount of padding thisNearRange = frame.getStartingRange() thisFarRange = frame.getFarRange() left_pad = int( round((thisNearRange - self._nearRange) * frame.getInstrument().getRangeSamplingRate() / (CN.SPEED_OF_LIGHT / 2.0))) * 2 right_pad = int( round((self._farRange - thisFarRange) * frame.getInstrument().getRangeSamplingRate() / (CN.SPEED_OF_LIGHT / 2.0))) * 2 width = frame.getImage().getXmax() if width - int(width) != 0: raise ValueError("frame Xmax is not an integer") else: width = int(width) input = frame.getImage().getFilename() # tempOutput = os.path.basename(os.tmpnam()) # Some temporary filename with tempfile.NamedTemporaryFile(dir='.') as f: tempOutput = f.name pad_value = int(frame.getInstrument().getInPhaseValue()) if totalWidth < left_pad + width + right_pad: totalWidth = left_pad + width + right_pad # Resample this frame with swst_resample input_c = c_char_p(bytes(input, 'utf-8')) output_c = c_char_p(bytes(tempOutput, 'utf-8')) width_c = c_int(width) left_pad_c = c_int(left_pad) right_pad_c = c_int(right_pad) pad_value_c = c_ubyte(pad_value) lib.swst_resample(input_c, output_c, byref(width_c), byref(left_pad_c), byref(right_pad_c), byref(pad_value_c)) outputs.append(tempOutput) auxList.append(frame.auxFile) #this step construct the aux file withe the pulsetime info for the all set of frames self.createAuxFile(auxList, output + '.aux') # This assumes that all of the frames to be concatenated are sampled at the same PRI prf = self._frames[0].getInstrument().getPulseRepetitionFrequency() # Calculate the starting output line of each scene i = 0 lineSort = [] # the listSort has 2 elements: a line start number which is the position of that specific frame # computed from acquisition time and the corresponding file name for frame in self._frames: startLine = int( round( DTU.timeDeltaToSeconds(frame.getSensingStart() - self._startTime) * prf)) lineSort.append([startLine, outputs[i]]) i += 1 sortedList = sorted( lineSort, key=itemgetter(0)) # sort by line number i.e. acquisition time startLines, outputs = self.reAdjustStartLine(sortedList, totalWidth) self.logger.info("Concatenating Frames along Track") # this is a hack since the length of the file could be actually different from the one computed using start and stop time. it only matters the last frame added import os fileSize = os.path.getsize(outputs[-1]) numLines = fileSize // totalWidth + startLines[-1] totalLines_c = c_int(numLines) # Next, call frame_concatenate width_c = c_int( totalWidth ) # Width of each frame (with the padding added in swst_resample) numberOfFrames_c = c_int(len(self._frames)) inputs_c = (c_char_p * len(outputs))( ) # These are the inputs to frame_concatenate, but the outputs from swst_resample for kk in range(len(outputs)): inputs_c[kk] = bytes(outputs[kk], 'utf-8') output_c = c_char_p(bytes(output, 'utf-8')) startLines_c = (c_int * len(startLines))() startLines_c[:] = startLines lib.frame_concatenate(output_c, byref(width_c), byref(totalLines_c), byref(numberOfFrames_c), inputs_c, startLines_c) # Clean up the temporary output files from swst_resample for file in outputs: os.unlink(file) orbitNum = self._frames[0].getOrbitNumber() first_line_utc = self._startTime last_line_utc = self._stopTime centerTime = DTU.timeDeltaToSeconds(last_line_utc - first_line_utc) / 2.0 center_line_utc = first_line_utc + datetime.timedelta( microseconds=int(centerTime * 1e6)) procFac = self._frames[0].getProcessingFacility() procSys = self._frames[0].getProcessingSystem() procSoft = self._frames[0].getProcessingSoftwareVersion() pol = self._frames[0].getPolarization() xmin = self._frames[0].getImage().getXmin() self._frame.setOrbitNumber(orbitNum) self._frame.setSensingStart(first_line_utc) self._frame.setSensingMid(center_line_utc) self._frame.setSensingStop(last_line_utc) self._frame.setStartingRange(self._nearRange) self._frame.setFarRange(self._farRange) self._frame.setProcessingFacility(procFac) self._frame.setProcessingSystem(procSys) self._frame.setProcessingSoftwareVersion(procSoft) self._frame.setPolarization(pol) self._frame.setNumberOfLines(numLines) self._frame.setNumberOfSamples(width) # add image to frame rawImage = isceobj.createRawImage() rawImage.setByteOrder('l') rawImage.setFilename(output) rawImage.setAccessMode('r') rawImage.setWidth(totalWidth) rawImage.setXmax(totalWidth) rawImage.setXmin(xmin) self._frame.setImage(rawImage)
def alarm(msg): liblog.log_record_advanced(ctypes.c_ubyte(1), ctypes.c_ubyte(32), ctypes.c_char_p(msg.encode()))
def LinearNd( out_data # ndim x out_size , in_data # ndim x in_size , weight # out_size x in_size , bias=None # out_size , rigor=False, verbose=False): """ Returns True on success, otherwize returns False Applies a 1D matrix multiplication over an input data data. Note that all nd-array lists are NumPy (mutable), not PyTorch tensor (immutable). :param out_data: <mutable> output data, out_data[ndim][out_size] :param in_data: input data, in_data[ndim][in_size] :param weight: weight[out_size][in_size] :param bias: bias for each output, bias[out_size] :param rigor: check values rigorously when 'True' :param verbose: output message more when 'True' :return: 'True' on success, 'False' on failure. Follwoings are derived from input arguments . ndim: first dimension of out/in_data . out_size: array size of out_data . in_size: array size of in_data . weight_size: dimension of weight . bias_size: array size of bias Following is an example usage for PyTorch. LinearNd( tensor_out_data.data.numpy() # ndim x out_size , tenso_in_data.data.numpy() # ndim x in_size , tensor_weight.data.numpy() # out_size x in_size , tensor_bias.data.numpy() # out_size , rigor=True , verbose=True) """ if rigor: error = 0 if (out_data.ndim != 2): error += 1 if verbose: dlr_common.DlrError("out_data is not 1 dim", flush=True) if (in_data.ndim != 2): error += 1 if verbose: dlr_common.DlrError("in_data is not 1 dim", flush=True) if (weight.ndim != 2): error += 1 if verbose: dlr_common.DlrError("weight is not 2 dim", flush=True) if (bias is not None) and (bias.ndim != 1): error += 1 if verbose: dlr_common.DlrError(f"bias should be 1 dim: {bias.ndim}", flush=True) t_out_ndim = out_data.shape[0] t_out_size = out_data.shape[1] # note ndim (i.e., rank) is 1 t_in_ndim = in_data.shape[0] t_in_size = in_data.shape[1] # note ndim (i.e., rank) is 1 t_weight_size_row = weight.shape[0] # note ndim (i.e., rank) is 2 t_weight_size_col = weight.shape[1] # note ndim (i.e., rank) is 2 if (t_out_ndim != t_in_ndim): error += 1 dlr_common.DlrError(f"dimension mis-match", flush=True) if (t_out_size != t_weight_size_row): error += 1 dlr_common.DlrError(f"out_size mis-match", flush=True) if (t_in_size != t_weight_size_col): error += 1 dlr_common.DlrError(f"out_size mis-match", flush=True) if verbose: dlr_common.DlrInfo(f"out_size ={t_out_size} {out_data.shape}") dlr_common.DlrInfo(f"in_size ={t_in_size} {in_data.shape}") dlr_common.DlrInfo( f"weight_size={t_weitht_dize_row} {t_weight_size_col}") if (error != 0): dlr_common.DlrError(" parameter mis-match", flush=True) return False #_fname='' #_ctype='' if out_data.dtype.type == np.int32: _fname = 'LinearNdInt' _ctype = ctypes.c_int elif out_data.dtype.type == np.float32: _fname = 'LinearNdFloat' _ctype = ctypes.c_float elif out_data.dtype.type == np.float64: _fname = 'LinearNdDouble' _ctype = ctypes.c_double else: dlr_common.DlrError(" not support " + str(out_data.dtype.type), flush=True) return False _LinearNd = dlr_common.WrapFunction( dlr_common._dlr, _fname, None # return type , [ ctypes.POINTER(_ctype) # out data , ctypes.POINTER(_ctype) # in data , ctypes.POINTER(_ctype) # weight , ctypes.POINTER(_ctype) # bias , ctypes.c_ushort # out_size , ctypes.c_ushort # in_size , ctypes.c_ushort # bias_size , ctypes.c_ubyte # ndim , ctypes.c_int # rigor , ctypes.c_int ]) # verbose CP_out_data = out_data.ctypes.data_as(ctypes.POINTER(_ctype)) CP_in_data = in_data.ctypes.data_as(ctypes.POINTER(_ctype)) CP_weight = weight.ctypes.data_as(ctypes.POINTER(_ctype)) CP_out_size = ctypes.c_ushort( out_data.shape[1]) # note ndim (i.e., rank) is 2 CP_in_size = ctypes.c_ushort( in_data.shape[1]) # note ndim (i.e., rank) is 2 CP_ndim = ctypes.c_ubyte(in_data.shape[0]) # note ndim (i.e., rank) is 2 CP_rigor = 1 if rigor else 0 CP_verbose = 1 if verbose else 0 if (bias is None) or (bias.size == 0): CP_bias = ctypes.POINTER(_ctype)() CP_bias_size = ctypes.c_ushort(0) else: CP_bias = bias.ctypes.data_as(ctypes.POINTER(_ctype)) CP_bias_size = ctypes.c_ushort(bias.shape[0]) _LinearNd(CP_out_data, CP_in_data, CP_weight, CP_bias, CP_out_size, CP_in_size, CP_bias_size, CP_ndim, CP_rigor, CP_verbose) return True
def bulk_read(port, blocks, dxl_ids, group_num=None): """ Read from multiple DXL MX-64s sending one bulk read packet This instruction is used for reading values of multiple MX series DXLs simultaneously by sending a single Instruction Packet. The packet length is shortened compared to sending multiple READ commands, and the idle time between the status packets being returned is also shortened to save communication time. However, this cannot be used to read a single module. If an identical ID is designated multiple times, only the first designated parameter will be processed. Args: port: Dynamixel portHandler object blocks: A list containing blocks of contiguous registers dxl_ids: A list containing DXL id numbers group_num: An instance of dynamixel.groupBulkRead defined in C Returns: A dictionary containing the motor id, the register names and their corresponding values. For e.g., if present position and goal position are read from 2 motors, the output would be: {'1': {'present_pos': 2.34, 'goal_pos': 3.21}, '2': {'present_pos': 1.23, 'goal_pos': 2.55}} """ # Initialize Group bulk read instance if group_num == None: group_num = init_bulk_read(port) if not isinstance(blocks, list): blocks = blocks[blocks] if not isinstance(dxl_ids, list): dxl_ids = [dxl_ids] assert len(blocks) == len(dxl_ids) # Add parameter storage for Dynamixel#1 present position value for i, (id, block) in enumerate(zip(dxl_ids, blocks)): address = block.offset length = block.width dxl_addparam_result = ctypes.c_ubyte( dynamixel.groupBulkReadAddParam(group_num, id, address, length)).value if dxl_addparam_result != 1: print("[ID:%03d] groupBulkRead addparam failed" % (id)) # Bulkread specified address dynamixel.groupBulkReadTxRxPacket(group_num) dxl_comm_result = dynamixel.getLastTxRxResult(port, PROTOCOL_VERSION) if dxl_comm_result != COMM_SUCCESS: print(dynamixel.getTxRxResult(PROTOCOL_VERSION, dxl_comm_result)) # Read the values and convert them vals_dict = {} for i, (id, block) in enumerate(zip(dxl_ids, blocks)): address = block.offset length = block.width # Check if groupbulkread data of Dynamixel#1 is available dxl_getdata_result = ctypes.c_ubyte( dynamixel.groupBulkReadIsAvailable(group_num, id, address, length)).value if dxl_getdata_result != 1: print("[ID:%03d] groupBulkRead getdata failed" % (id)) raw_val = dynamixel.groupBulkReadGetData(group_num, id, address, length) data = block.vals_from_data([raw_val]) vals_dict[i] = data return vals_dict
a['CountType'] = itertools.count(0) # data compression and archiving (CH 12) a['TarInfoType'] = tarfile.TarInfo() # generic operating system services (CH 15) a['LoggerType'] = logging.getLogger() a['FormatterType'] = logging.Formatter() # pickle ok a['FilterType'] = logging.Filter() # pickle ok a['LogRecordType'] = logging.makeLogRecord(_dict) # pickle ok a['OptionParserType'] = _oparser = optparse.OptionParser() # pickle ok a['OptionGroupType'] = optparse.OptionGroup(_oparser, "foo") # pickle ok a['OptionType'] = optparse.Option('--foo') # pickle ok if HAS_CTYPES: a['CCharType'] = _cchar = ctypes.c_char() a['CWCharType'] = ctypes.c_wchar() # fail == 2.6 a['CByteType'] = ctypes.c_byte() a['CUByteType'] = ctypes.c_ubyte() a['CShortType'] = ctypes.c_short() a['CUShortType'] = ctypes.c_ushort() a['CIntType'] = ctypes.c_int() a['CUIntType'] = ctypes.c_uint() a['CLongType'] = ctypes.c_long() a['CULongType'] = ctypes.c_ulong() a['CLongLongType'] = ctypes.c_longlong() a['CULongLongType'] = ctypes.c_ulonglong() a['CFloatType'] = ctypes.c_float() a['CDoubleType'] = ctypes.c_double() a['CSizeTType'] = ctypes.c_size_t() a['CLibraryLoaderType'] = ctypes.cdll a['StructureType'] = _Struct if not IS_PYPY: a['BigEndianStructureType'] = ctypes.BigEndianStructure()
def setDisplayCharacter(self, row, column, character): """Sets a single character on the display. On linux, it has been found that encoding the string with iso-8859-15 encoding prevents a "UnicodeEncodeError" that would occur and ensure the character being displayed is correct between windows and linux. e.g. textLCD.setDisplayCharacter(0, 0, chr(223).encode("iso-8859-15")) Parameters: row<int>: the index of the row to write the character to. column<int>: the index of the column to write the character to. character<char>: the character to display. Exceptions: RuntimeError - If current platform is not supported/phidget c dll cannot be found PhidgetException: If this Phidget is not opened and attached, or if the row index is invalid. """ try: result = PhidgetLibrary.getDll().CPhidgetTextLCD_setDisplayCharacter(self.handle, c_int(row), c_int(column), c_ubyte(character)) except RuntimeError: raise if result > 0: raise PhidgetException(result)
def Read_Sync_Once(self): groupread_num = self.groupread_num port_num = self.port_num proto_ver = self.proto_ver read_addr = self.read_addr read_len = self.read_len dynamixel.groupSyncReadTxRxPacket(groupread_num) dxl_comm_result = dynamixel.getLastTxRxResult(port_num, proto_ver) if dxl_comm_result != COMM_SUCCESS: print(dynamixel.getTxRxResult(PROTOCOL_VERSION, dxl_comm_result)) # Check if groupsyncread data of Dynamixel#1 is available dxl_getdata_result = ctypes.c_ubyte( dynamixel.groupSyncReadIsAvailable(groupread_num, self.m1id, read_addr, read_len)).value if dxl_getdata_result != 1: print("[ID:%03d] groupSyncRead getdata failed" % (self.m1id)) quit() # Check if groupsyncread data of Dynamixel#2 is available dxl_getdata_result = ctypes.c_ubyte( dynamixel.groupSyncReadIsAvailable(groupread_num, self.m2id, read_addr, read_len)).value if dxl_getdata_result != 1: print("[ID:%03d] groupSyncRead getdata failed" % (self.m2id)) quit() # Check if groupsyncread data of Dynamixel#3 is available dxl_getdata_result = ctypes.c_ubyte( dynamixel.groupSyncReadIsAvailable(groupread_num, self.m3id, read_addr, read_len)).value if dxl_getdata_result != 1: print("[ID:%03d] groupSyncRead getdata failed" % (self.m3id)) quit() # Check if groupsyncread data of Dynamixel#4 is available dxl_getdata_result = ctypes.c_ubyte( dynamixel.groupSyncReadIsAvailable(groupread_num, self.m4id, read_addr, read_len)).value if dxl_getdata_result != 1: print("[ID:%03d] groupSyncRead getdata failed" % (self.m4id)) quit() # Get Dynamixel#1 current dxl1_current = ctypes.c_int16( dynamixel.groupSyncReadGetData(groupread_num, self.m1id, read_addr, read_len)).value # Get Dynamixel#2 current dxl2_current = ctypes.c_int16( dynamixel.groupSyncReadGetData(groupread_num, self.m2id, read_addr, read_len)).value # Get Dynamixel#3 current dxl3_current = ctypes.c_int16( dynamixel.groupSyncReadGetData(groupread_num, self.m3id, read_addr, read_len)).value # Get Dynamixel#4 current dxl4_current = ctypes.c_int16( dynamixel.groupSyncReadGetData(groupread_num, self.m4id, read_addr, read_len)).value dt = datetime.datetime.now() timestamp = dt.minute * 60000000 + dt.second * 1000000 + dt.microsecond difft = timestamp - self.timestamp0 return [difft, dxl1_current, dxl2_current, dxl3_current, dxl4_current]
def set_flag(self, name, value): name = name.upper() if (name in self.FLAGSDEF): self.flags = (self.flags & (ctypes.c_ubyte(~self.FLAGSDEF[name].mask).value)) \ | ((value & 0x01) << self.FLAGSDEF[name].offset)
for bl in chain: block_code = nodes[bl].code succ = list(g.successors(bl)) if len(succ) != 0 and nodes[bl].missing: if len(succ) == 2: target = filter(lambda x: g.edges[bl, x]['dirr'] == 'right', succ)[0] else: target = succ[0] repr_size = nodes[bl].missing off_in_this = off_in_chain[bl] + len(block_code) + repr_size off_in_target = off_in_chain[target] off = -(piece_off + off_in_this) + ( m[piece[chain_no[target]]].as_long() + off_in_target) off = ctypes.c_ubyte( off).value if repr_size == 1 else ctypes.c_uint(off).value block_code += chr(off) if repr_size == 1 else p32(off) chain_code += block_code code_map[piece_off] = chain_code func_code = fit(code_map, filler='\x90') with open('traverse.bin', 'wb') as f: f.write(func_code) with open('traverse.code', 'w') as f: f.write(disasm(func_code, offset=True))
def __getitem__(self, i): if (isinstance(i, slice)): start = i.start stop = i.stop step = i.step if (start): if (start < 0): if (self.size() + start < 0 or self.datapoints + start < 0): raise ValueError("start index out of buffer bounds") start = self.datapoints + start else: if (start > self.datapoints or start < self.datapoints - self.size()): raise ValueError("start index out of buffer bounds") else: if (self.datapoints > self.size()): start = self.datapoints - self.size() else: start = 0 if (stop): if (stop < 0): if (self.size() + stop < 0 or self.datapoints + stop < 0): raise ValueError("stop index out of buffer bounds") stop = self.datapoints + stop else: if (stop > self.datapoints or stop < self.datapoints - self.size()): raise ValueError("stop index out of buffer bounds") else: stop = self.datapoints if (stop < start): raise ValueError("Array indexing invalid") elif (stop == start): return (array([], dtype=numpy.uint8), array([], dtype=numpy.double)) arrlen = stop - start c = numpy.empty(arrlen, dtype=numpy.uint8) t = numpy.empty(arrlen, dtype=numpy.uint64) libttag.tt_readarray( self.tt_buf, start, c.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), t.ctypes.data_as(ctypes.POINTER(ctypes.c_ulonglong)), arrlen) if (step): c = c[::step] t = t[::step] if (numpy.isnan(self.resolution) or self.tagsAsTime == False): return (c, t) else: return (c, t.astype(numpy.double) * self.resolution) else: if (i < 0): if (self.size() + i < 0 or self.datapoints + i < 0): raise ValueError("Array index out of buffer bounds") i = self.datapoints + i else: if (i > self.datapoints or i < self.datapoints - self.size()): raise ValueError("Array index out of buffer bounds") c = ctypes.c_ubyte() t = ctypes.c_ulonglong() libttag.tt_readarray(self.tt_buf, i, ctypes.byref(c), ctypes.byref(t), 1) if (numpy.isnan(self.resolution) or self.tagsAsTime == False): return (c.value, t.value) else: return (c.value, t.value * self.resolution)