def sum_8bit(self, a, b): """ Add two 8-bit signed integers. *Note: Python only has one* :py:class:`int` *data type to represent integer values. The* :meth:`~.fortran32.Fortran32.sum_8bit` *method converts the data types of* ``a`` *and* ``b`` *to be* :py:class:`ctypes.c_int8`. The corresponding FORTRAN code is .. code-block:: fortran function sum_8bit(a, b) result(value) !DEC$ ATTRIBUTES DLLEXPORT, ALIAS:'sum_8bit' :: sum_8bit implicit none integer(1) :: a, b, value value = a + b end function sum_8bit See the corresponding 64-bit :meth:`~.fortran64.Fortran64.sum_8bit` method. Args: a (int): The first 8-bit signed integer. b (int): The second 8-bit signed integer. Returns: :py:class:`int`: The sum of ``a`` and ``b``. """ ac = ctypes.c_int8(a) bc = ctypes.c_int8(b) self.lib.sum_8bit.restype = ctypes.c_int8 return self.lib.sum_8bit(ctypes.byref(ac), ctypes.byref(bc))
def get_position_change(self): report = self._read_report() if report: x = float(ctypes.c_int8(report[1]).value) y = float(ctypes.c_int8(report[2]).value) return x, y return 0.0, 0.0
def get_xy_movement_of_connection_strip(map_id, connection_id): map1 = extract_maps.map_headers[map_id] connections = map1["connections"] connection = connections[connection_id] direction = connection["direction"] current_map_location = int(connection["current_map_tile_pointer"], 16) map2 = extract_maps.map_headers[connection["map_id"]] map2_height = int(map2["y"], 16) map2_width = int(map2["x"], 16) y_mov = None #if direction == "WEST": # y_mov = ((current_map_location - 0xC6E8) / (map2_width + 6)) - 3 #elif direction == "EAST": # y_mov = ((current_map_location - 0xC6E5) / (map2_width + 6)) - 4 if direction in ["WEST", "EAST"]: y_mov = c_int8(connection["y"]).value / -2 x_mov = None #if direction == "NORTH": # x_mov = current_map_location - 0xC6EB #elif direction == "SOUTH": # x_mov = current_map_location - 0xC6EB - ((map2_height + 3) * (map2_width + 6)) if direction in ["NORTH", "SOUTH"]: x_mov = c_int8(connection["x"]).value / -2 return {"y_mov": y_mov, "x_mov": x_mov}
def get_info(self): sensors_array = bytearray() start_sensor = 0 start_time = datetime.now() if (debug_mode): print("Collecting Sensors Info from OCC:\n", start_sensor) while start_sensor < self.number_of_sensors: r = self.get_sensor_info(start_sensor) # print(r.text) packet_length = r.json()['data'][3] * 256 + r.json()['data'][4] # names = str(bytearray(r.json()['data'][5:packet_length + 5]),'utf8', errors='ignore') names = bytearray(r.json()['data'][5:packet_length + 5]) sensors_array.extend(names) start_sensor = start_sensor + \ len(re.findall(r'\w+\0[\\ \/ \% \# A-z]+\0', str(names,'utf8', errors='ignore'))) if (debug_mode): print(start_sensor) # workaround for the OCC pass through commands timeout issue # https://github.com/openbmc/openbmc/issues/1700 time.sleep(1) if (debug_mode): print(start_sensor) # sensors_array.extend(bytearray(b'\x00')) delta = datetime.now() - start_time # print("elapsed time in microseconds", delta.microseconds) # sensors_array = sensors_array[:-2] sensor_tuples = zunpack('>' + 'zzII' * (self.number_of_sensors), str(sensors_array, 'windows-1252')) fields_names = ["name", "unit", "frequency", "scale"] sensor_list = [ sensor_tuples[i:i + len(fields_names)] for i in range(0, len(sensor_tuples), len(fields_names)) ] self.Info = pd.DataFrame(sensor_list, columns=fields_names) # convert Amester's NeM format into numerical samples/sec rate self.Info.frequency = self.Info.frequency.map( lambda x: ((x >> 8) * (10.0**ctypes.c_int8(x & 0xff).value))) self.Info.scale = self.Info.scale.map( lambda x: ((x >> 8) * (10.0**ctypes.c_int8(x & 0xff).value))) # Cleanup Sensor Info Strings self.Info['name'] = self.Info['name'].str.decode("utf-8") self.Info['unit'] = self.Info['unit'].str.decode("utf-8")
def get_position(): """Returns the X position of the joystick. Note: An exception is thrown if it fails to read the position from the chip. """ pos_x = ctypes.c_int8(0) pos_y = ctypes.c_int8(0) ret = _LIB.joystick_click_get_position(ctypes.byref(pos_x), ctypes.byref(pos_y)) if ret < 0: raise Exception("joystick click get position failed") return (pos_x.value, pos_y.value)
def get_info(self): sensors_array = bytearray() start_sensor = 0 start_time = datetime.now() if(debug_mode): print("Collecting Sensors Info from OCC:\n", start_sensor) while start_sensor < self.number_of_sensors: r = self.get_sensor_info(start_sensor) # print(r.text) packet_length = r.json()['data'][3] * 256 + r.json()['data'][4] # names = str(bytearray(r.json()['data'][5:packet_length + 5]),'utf8', errors='ignore') names = bytearray(r.json()['data'][5:packet_length + 5]) sensors_array.extend(names) start_sensor = start_sensor + \ len(re.findall(r'\w+\0[\\ \/ \% \# A-z]+\0', str(names,'utf8', errors='ignore'))) if (debug_mode): print(start_sensor) # workaround for the OCC pass through commands timeout issue # https://github.com/openbmc/openbmc/issues/1700 time.sleep(1) if(debug_mode): print(start_sensor) # sensors_array.extend(bytearray(b'\x00')) delta = datetime.now() - start_time # print("elapsed time in microseconds", delta.microseconds) # sensors_array = sensors_array[:-2] sensor_tuples = zunpack('>' + 'zzII' * (self.number_of_sensors), str(sensors_array,'windows-1252')) fields_names = ["name", "unit", "frequency", "scale"] sensor_list = [sensor_tuples[i:i + len(fields_names)] for i in range(0, len(sensor_tuples), len(fields_names))] self.Info = pd.DataFrame(sensor_list, columns=fields_names) # convert Amester's NeM format into numerical samples/sec rate self.Info.frequency = self.Info.frequency.map( lambda x: ((x >> 8) * (10.0 ** ctypes.c_int8(x & 0xff).value))) self.Info.scale = self.Info.scale.map( lambda x: ((x >> 8) * (10.0 ** ctypes.c_int8(x & 0xff).value))) # Cleanup Sensor Info Strings self.Info['name'] = self.Info['name'].str.decode("utf-8") self.Info['unit'] = self.Info['unit'].str.decode("utf-8")
def get_target_cursor_values(physical_port, lane, target): """ This API specifically returns the cursor equalization parameters for a target(NIC, TOR1, TOR2). This includes pre one, pre two , main, post one, post two cursor values Args: physical_port: an Integer, the actual physical port connected to a Y cable lane: an Integer, the lane on which to collect the cursor values 1 -> lane 1, 2 -> lane 2 3 -> lane 3 4 -> lane 4 target: an Integer, the actual target to get the cursor values on TARGET_NIC -> NIC, TARGET_TOR1-> TOR1, TARGET_TOR2 -> TOR2 Returns: an list, with pre one, pre two , main, post one, post two cursor values in the order """ curr_offset = OFFSET_NIC_CURSOR_VALUES result = [] if platform_chassis is not None: pre1 = platform_chassis.get_sfp(physical_port).read_eeprom(curr_offset + (target)*20 + (lane-1)*5, 1) y_cable_validate_read_data(pre1, 1, physical_port, "target cursor result") result.append(c_int8(pre1[0]).value) pre2 = platform_chassis.get_sfp(physical_port).read_eeprom(curr_offset + (target)*20 + (lane-1)*5 + 1, 1) y_cable_validate_read_data(pre2, 1, physical_port, "target cursor result") result.append(c_int8(pre2[0]).value) main = platform_chassis.get_sfp(physical_port).read_eeprom(curr_offset + (target)*20 + (lane-1)*5 + 2, 1) y_cable_validate_read_data(main, 1, physical_port, "target cursor result") result.append(c_int8(main[0]).value) post1 = platform_chassis.get_sfp(physical_port).read_eeprom(curr_offset + (target)*20 + (lane-1)*5 + 3, 1) y_cable_validate_read_data(post1, 1, physical_port, "target cursor result") result.append(c_int8(post1[0]).value) post2 = platform_chassis.get_sfp(physical_port).read_eeprom(curr_offset + (target)*20 + (lane-1)*5 + 4, 1) y_cable_validate_read_data(post2, 1, physical_port, "target cursor result") result.append(c_int8(post2[0]).value) else: helper_logger.log_error("platform_chassis is not loaded, failed to get target cursor values") return -1 return result
def entry_set_next(handle: EntrySetHandle) -> Optional[Entry]: ffi_entry = FfiEntry() found = c_int8(0) do_call("askar_entry_set_next", handle, byref(ffi_entry), byref(found)) if found: return ffi_entry.decode(handle) return None
def AdvVer2_GetStatusTagUInt8(tagId: int) -> int: pTagValue = pointer(c_int8(0)) ret_val = advDLL.AdvVer2_GetStatusTagUInt8(c_uint(tagId), pTagValue) & RET_VAL_MASK if ret_val is not S_OK: return -1 return pTagValue.contents.value
def spinNodeGetValue(self): if self.spinNodeIsReadable(): b_value = ctypes.c_int8(0) checkErrorCode(spindll.spinBooleanGetValue(self.h_node, ctypes.byref(b_value)), "spinBooleanGetValue") self.value = (b_value.value == 1) return self.value
def getLastTimestamps(self, reset): """ Retrieve Last Timestamp Values Retrieves the timestamp values of the last n detected events on all TDC channels. The buffer size must have been set with @ref setTimestampBufferSize , otherwise 0 data will be returned. @param bool reset If the data should be cleared after retrieving. @return dict of values: 'timestamps' numpy.array: Timestamps of the last events in base units, see @ref TDC_getTimebase. 'channels' numpy.array: Numbers of the channels where the events have been detected. Every array element belongs to the timestamp with the same index. Range is 0...7 for channels 1...8. 'valid' : Number of valid entries in the above arrays. May be less than the buffer size if the buffer has been cleared. """ bufsize = self.getTimestampBufferSize() timestamps = [ct.c_int64()] * bufsize channels = [ct.c_int8()] * bufsize valid = ct.c_int32() rc = self.tdcbase.TDC_getLastTimestamps(reset, timestamps, channels, valid) timestamps_py = np.ctypeslib.as_array(timestamps, shape=valid.value) channels_py = np.ctypeslib.as_array(channels, shape=valid.value) if rc != 0: raise QuTauError(rc) return { 'timestamps': timestamps_py, 'channels': channels_py, 'valid': valid }
def l16_to_float(self, input_val): # EM2130L uses exp = -13 exponent = ctypes.c_int8(-13) mantissa = ctypes.c_uint16(input_val) # compute value and return return mantissa.value * pow(2, exponent.value)
def unpackData(dataFile, device): ################################################################################ # unpackData # read data from oscilloscope files # # Args: # + dataFile: path to the data to read # + device: type of files ('i', 'hackrf') # # Returns: # + x: the traces stored in the given file ################################################################################ if device == 'i': dataFileHandler = open(dataFile, mode='br') x = np.array([ ctypes.c_int8(i).value for i in bytearray(dataFileHandler.read()) ]) dataFileHandler.close() return x elif (device == 'npy'): return np.load(dataFile, allow_pickle=True) elif device == 'hackrf': return np.fromfile(dataFile, dtype=np.complex64) else: # pico MAX_VALUE = 32512. return np.fromfile(dataFile, np.dtype('int16')) / MAX_VALUE
def key_derive_ecdh_1pu( key_alg: Union[str, KeyAlg], ephem_key: LocalKeyHandle, sender_key: LocalKeyHandle, receiver_key: LocalKeyHandle, alg_id: Union[bytes, str, ByteBuffer], apu: Union[bytes, str, ByteBuffer], apv: Union[bytes, str, ByteBuffer], cc_tag: Optional[Union[bytes, ByteBuffer]], receive: bool, ) -> LocalKeyHandle: key = LocalKeyHandle() if isinstance(key_alg, KeyAlg): key_alg = key_alg.value do_call( "askar_key_derive_ecdh_1pu", encode_str(key_alg), ephem_key, sender_key, receiver_key, encode_bytes(alg_id), encode_bytes(apu), encode_bytes(apv), encode_bytes(cc_tag), c_int8(receive), byref(key), ) return key
def _hash_file_name(self, name: str) -> int: # # The algorithm looks like this in C++: # # uint32_t hash = 0; # int i = 0; # while (true) { # char c = string[i++]; # if (!c) # break; # hash = hash * multiplier + c; # } # # There are three things we need to be careful about when reimplementing it in Python: # * the hash is kept in an unsigned 32-bit integer variable, # thus standard integer arithmetic (wrapping, signedness, etc.) applies; # * the character is stored in a char variable. Char signedness is implementation-defined # but Nintendo's tools use signed chars; # * the string is iterated byte per byte, not Unicode-character-wise. # h = 0 for b in name.encode(): c = ctypes.c_int8(b).value h = (c + (h * self._hash_multiplier) & 0xffffffff) & 0xffffffff return h
def _lock(self, metric, lock_name): # NOTE(sileht): current stable python binding (0.80.X) doesn't # have rados_lock_XXX method, so do ourself the call with ctypes # # https://github.com/ceph/ceph/commit/f5bf75fa4109b6449a88c7ffdce343cf4691a4f9 # When ^^ is released, we can drop this code and directly use: # - ctx.lock_exclusive(name, 'lock', 'gnocchi') # - ctx.unlock(name, 'lock', 'gnocchi') name = self._get_object_name(metric, lock_name) with self._get_ioctx() as ctx: while True: ret = rados.run_in_thread( ctx.librados.rados_lock_exclusive, (ctx.io, ctypes.c_char_p(name.encode('ascii')), ctypes.c_char_p(b"lock"), ctypes.c_char_p(b"gnocchi"), ctypes.c_char_p(b""), None, ctypes.c_int8(0))) if ret in [errno.EBUSY, errno.EEXIST]: time.sleep(0.1) elif ret < 0: rados.make_ex(ret, "Error while getting lock of %s" % name) else: break try: yield finally: ret = rados.run_in_thread( ctx.librados.rados_unlock, (ctx.io, ctypes.c_char_p(name.encode('ascii')), ctypes.c_char_p(b"lock"), ctypes.c_char_p(b"gnocchi"))) if ret < 0: rados.make_ex(ret, "Error while releasing lock of %s" % name)
def get_unit_info(self, info=None, include_name=True): """Retrieves information about the PT-104 Data Logger. If the device fails to open, or no device is opened only the driver version is available. Parameters ---------- info : :class:`~.enums.PicoScopeInfoApi`, optional An enum value or member name. If :data:`None` then request all information from the PT-104. include_name : :class:`bool`, optional If :data:`True` then includes the enum member name as a prefix. For example, returns ``'CAL_DATE: 09Aug16'`` if `include_name` is :data:`True` else ``'09Aug16'``. Returns ------- :class:`str` The requested information from the PT-104 Data Logger. """ if info is None: values = [PicoScopeInfoApi(i) for i in range(7)] # only the first 7 items are supported by the SDK else: values = [self.convert_to_enum(info, PicoScopeInfoApi, to_upper=True)] string = c_int8(127) required_size = c_int16() msg = '' for value in values: name = '{}: '.format(value.name) if include_name else '' self.sdk.UsbPt104GetUnitInfo(self._handle, byref(string), string.value, byref(required_size), value) msg += '{}{}\n'.format(name, string_at(addressof(string)).decode()) return msg[:-1]
def __init__(self, device, spimode=0): self._file = open(device, 'r+') self._no = self._file.fileno() param = _IOC(SPI_IOC_MAGIC, SPI_IOC_WR_MODE, 1, IOW) assert spimode in range(4) value = ctypes.c_int8(spimode) # SPI MODE 0 ioctl(self._no, param, ctypes.addressof(value))
def spinNodeIsWritable(self, warn = True): pb_result = ctypes.c_int8(0) checkErrorCode(spindll.spinNodeIsWritable(self.h_node, ctypes.byref(pb_result)), "spinNodeIsWritable") writeable = (pb_result.value == 1) if warn and not writeable: print("Property " + self.name + " is not writeable.") return writeable
def AttributeList_GetInt8(attributeList, attribute): c_GetInt8 = conf.lib.OTF2_AttributeList_GetInt8 c_GetInt8.argtypes = [ ctypes.POINTER(AttributeList), AttributeRef, ctypes.POINTER(ctypes.c_int8) ] c_GetInt8.restype = ErrorCode c_GetInt8.errcheck = HandleErrorCode int8Value = ctypes.c_int8() c_GetInt8(attributeList, attribute, ctypes.byref(int8Value)) return int8Value.value
def write_bit(self, a_reg_add, a_bit_num, a_bit): byte = self.__bus.read_byte_data(self.__dev_id, a_reg_add) if a_bit: byte |= 1 << a_bit_num else: byte &= ~(1 << a_bit_num) self.__bus.write_byte_data( self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)
def write_bit(self, a_reg_add, a_bit_num, a_bit): byte = self.__bus.read_byte_data(self.__dev_id, a_reg_add) if a_bit: byte |= 1 << a_bit_num else: byte &= ~(1 << a_bit_num) self.__bus.write_byte_data(self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)
def key_generate(alg: Union[str, KeyAlg], ephemeral: bool = False) -> LocalKeyHandle: handle = LocalKeyHandle() if isinstance(alg, KeyAlg): alg = alg.value do_call("askar_key_generate", encode_str(alg), c_int8(ephemeral), byref(handle)) return handle
def write_bit(self, address, bit_num, bit_value): byte = self.bus.read_byte_data(self.address, address) if bit_value: byte |= 1 << bit_num else: byte &= ~(1 << bit_num) self.bus.write_byte_data(self.address, address, ctypes.c_int8(byte).value)
def __setattr__(self, name, value): if name in ["x", "y", "accumulator"]: value = c_int8(value).value elif name in ["status", "stack_pointer"]: value = c_uint8(value).value elif name is "program_counter": value = c_uint16(value).value super().__setattr__(name, value)
def __init__(self): self.m_carTelemetryData = [CarTelemetryData() for x in range(22)] self.packet_format = "".join(self.car_telemetry_data_format * 22) + self.extra_telemetry_data_format self.m_buttonStatus = ctypes.c_uint32(0) self.m_mfdPanelIndex = ctypes.c_uint8(0) self.m_mfdPanelIndexSecondaryPlayer = ctypes.c_uint8(0) self.m_suggestedGear = ctypes.c_int8(0)
def test_output_arg(self): string = String('\u1156\u2278\u3390\u44AB') for btarray in ([0] * 4, (0, ) * 4, jarray(jbyte)([0] * 4)): # This version of getBytes returns the 8 low-order of each Unicode character. string.getBytes(0, 4, btarray, 0) if not isinstance(btarray, tuple): self.assertEquals( btarray, [ctypes.c_int8(x).value for x in [0x56, 0x78, 0x90, 0xAB]])
async def close(self, commit: bool = False): """Close the session.""" if not getattr(self, "_closed", False): await do_call_async( "askar_session_close", self, c_int8(commit), ) setattr(self, "_closed", True)
def int8(self): if TIBRVMSG_I8 <= self._type <= TIBRVMSG_U64: return self._data try: val = _ctypes.c_int8(self._data) return val.value except: return None
def InitBoard(self): self.dll.DLLInitBoard(ct.c_uint32(self.board_number), ct.c_int8(self.sym), ct.c_uint8(self.burst), ct.c_uint32(self.pixels), ct.c_uint32(self.waits), ct.c_uint32(self.flag816), ct.c_uint32(self.pportadr), ct.c_uint32(self.pclk), ct.c_uint32(self.adrdelay))
def __DLLRingBlockTrig(self, channel) -> bool: channel_ct = ct.c_int8(channel) try: out = self.__IRdet.DLLRingBlockTrig(channel_ct) except: out = 0 raise myexc.DLLerror("DLLRingBlockTrig") finally: return True if out != 0 else False
def get_string(self, string_value): """ This function is in the header file, but it is not in the manual. """ string = c_int8() string_length = c_int32() self.sdk.ps4000aGetString(self._handle, string_value, byref(string), byref(string_length)) return string.value, string_length.value
def simple_callback(device: BLEDevice, advertisement_data: AdvertisementData): if device.address == "E1:38:D4:CD:DE:AF": rssi_list = [] us_list = [] adv_bytearray = list(advertisement_data.service_data.values())[0] node_values = [ctypes.c_int8(adv_bytearray[i]).value for i in range(len(adv_bytearray))] rssi_list.extend(node_values[3:15]) us_list.extend(node_values[15:17]) print("RSSI: ", rssi_list, ", US: ", us_list)
def spinNodeSetValue(self, new_value): if (new_value != self.value): if not isinstance(new_value, bool): raise SpinnakerExceptionValue("Value for " + self.name + " of " + str(new_value) + " is not a boolean.") b_value = ctypes.c_int8(new_value) checkErrorCode(spindll.spinBooleanSetValue(self.h_node, b_value), "spinBooleanSetValue") self.value = new_value return self.value
def read_int8(self): __FMT = '!b' if (self.endian == LittleEndian) else '@b' if self.file is not None: size = struct.calcsize(__FMT) buf = self.file.read(size) num = struct.unpack_from(__FMT, buf) return ctypes.c_int8(num[0]).value # return int(num[0]) return 0
def write_bits(self, a_reg_add, a_bit_start, a_length, a_data): byte = self.__bus.read_byte_data(self.__dev_id, a_reg_add) mask = ((1 << a_length) - 1) << (a_bit_start - a_length + 1) # Get data in position and zero all non-important bits in data a_data <<= a_bit_start - a_length + 1 a_data &= mask # Clear all important bits in read byte and combine with data byte &= ~mask byte = byte | a_data # Write the data to the I2C device self.__bus.write_byte_data( self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)
def test_ints(self): self.loopback(int(-420000)) self.loopback(long(-420000)) import ctypes self.loopback(ctypes.c_uint8(42)) self.loopback(ctypes.c_int8(-42)) self.loopback(ctypes.c_uint16(4200)) self.loopback(ctypes.c_int16(-4200)) try: import numpy except ImportError: return self.loopback(numpy.uint16(4200)) self.loopback(numpy.int16(-4200))
def peek_int8(self): __FMT = '!b' if (self.endian == LittleEndian) else '@b' if self.byte_data is not None: size = struct.calcsize(__FMT) if self.byte_offset + size > self.byte_size: return 0 buf = self.byte_data[self.byte_offset:self.byte_offset + size] num = struct.unpack_from(__FMT, buf) return ctypes.c_int8(num[0]).value # return int(num[0]) return 0
def format(self, data, bits): if self._fmt == DataModel.F_DEC_NEG: if bits == 8: return ctypes.c_int8(data).value elif bits == 16: return ctypes.c_int16(data).value elif self._fmt == DataModel.F_BIN: return str.format("0b{0:0%db}" % bits, data) elif self._fmt == DataModel.F_HEX: return str.format("0x{0:0%dx}" % (bits//4), data) elif self._fmt == DataModel.F_OCT: return str.format("0o{0:0%do}" % (bits//3), data) return data
def rounding(self, unrounded, rounding_algorithm=None, rounded_max=None, rounded_min=None): if rounding_algorithm == 'int': output = int(unrounded) elif rounding_algorithm == 'int8': output = ctypes.c_int8(unrounded).value elif rounding_algorithm == 'int16': output = ctypes.c_int16(unrounded).value elif rounding_algorithm == 'int32': output = ctypes.c_int32(unrounded).value elif rounding_algorithm == 'float': output = ctypes.c_float(unrounded).value elif 'bit' in rounding_algorithm: bits = int(rounding_algorithm.replace('bit', '')) output = unrounded - unrounded % (rounded_max - rounded_min)/2.**bits else: raise Exception(ValueError) return output
def write_array(self, fout, frame_index): fout.write("{\n\t") for i, value in enumerate(self.frames[frame_index]): if i: fout.write(",") if i % 8 == 0: fout.write("\n\t") else: fout.write(" ") if self.hex_format and "int" in self.data_format: if self.data_format == "int8": value = ctypes.c_int8(int(value, 16)).value elif self.data_format == "int16": value = ctypes.c_int16(int(value, 16)).value elif self.data_format == "int32": value = ctypes.c_int32(int(value, 16)).value elif self.data_format == "int64": value = ctypes.c_int64(int(value, 16)).value value = str(value) fout.write('{:>4}'.format(value)) fout.write("\n}")
def _lowLevelEnumerateUnits(self): count = c_int16(0) serials = c_int8(0) serialLth = c_int16(0) m = self.lib.ps4000aEnumerateUnits(byref(count), byref(serials), byref(serialLth)) self.checkResult(m) # a serial number is rouhgly 8 characters # an extra character for the comma # and an extra one for the space after the comma? # the extra two also work for the null termination serialLth = c_int16(count.value * (8 + 2)) serials = create_string_buffer(serialLth.value + 1) m = self.lib.ps4000aEnumerateUnits(byref(count), serials, byref(serialLth)) self.checkResult(m) serialList = str(serials.value.decode('utf-8')).split(',') serialList = [x.strip() for x in serialList] return serialList
def Init(): mpc04.MPC_Initialize() i = ctypes.c_int8(0) mpc04.MPC_GetNumberOfDevices(byref(i)) device_num = i.value print device_num if (device_num > 0): arr = ctypes.c_int * device_num device_arr = arr() mpc04.MPC_GetDeviceSerialNumList(byref(device_arr)) for device_sn in device_arr: print device_sn device_handle = ctypes.c_int(0) mpc04.MPC_GetMpcDeviceHandle(device_sn, byref(device_handle)) print device_handle.value mpc04.MPC_Connect(device_handle) print mpc04.MPC_IsConnected(device_handle) return device_handle return None
def acquire(self, blocking=True): with self._get_ioctx() as ctx: while True: ret = rados.run_in_thread( ctx.librados.rados_lock_exclusive, ( ctx.io, ctypes.c_char_p(self._name.encode("ascii")), ctypes.c_char_p(b"lock"), ctypes.c_char_p(b"gnocchi"), ctypes.c_char_p(b""), None, ctypes.c_int8(0), ), ) if ret in [errno.EBUSY, errno.EEXIST]: if blocking: time.sleep(0.1) else: return False elif ret < 0: rados.make_ex(ret, "Error while getting lock of %s" % self._name) else: return True
def export_frame_as_binary(self, float_code, fout, frame, pack_format): if not self.hex_format: if float_code == 1: for value in frame: fout.write(struct.pack(pack_format, float(value))) else: for value in frame: fout.write(struct.pack(pack_format, int(value))) else: if float_code == 1: for value in frame: fout.write(struct.pack(pack_format, float.fromhex(value))) else: for value in frame: if self.data_format == "int8": value = ctypes.c_int8(int(value, 16)).value elif self.data_format == "int16": value = ctypes.c_int16(int(value, 16)).value elif self.data_format == "int32": value = ctypes.c_int32(int(value, 16)).value elif self.data_format == "int64": value = ctypes.c_int64(int(value, 16)).value fout.write(struct.pack(pack_format, value))
def parse(self): """ Disassembles stuff and things. """ rom = self.rom start_address = self.start_address end_address = self.end_address max_size = self.max_size debug = self.debug bank_id = start_address / 0x4000 # [{"command": 0x20, "bytes": [0x20, 0x40, 0x50], # "asm": "jp $5040", "label": "Unknown5040"}] asm_commands = {} offset = start_address last_hl_address = None last_a_address = None used_3d97 = False keep_reading = True while (end_address != 0 and offset <= end_address) or keep_reading: # read the current opcode byte current_byte = ord(rom[offset]) current_byte_number = len(asm_commands.keys()) # setup this next/upcoming command if offset in asm_commands.keys(): asm_command = asm_commands[offset] else: asm_command = {} asm_command["address"] = offset if not "references" in asm_command.keys(): # This counts how many times relative jumps reference this # byte. This is used to determine whether or not to print out a # label later. asm_command["references"] = 0 # some commands have two opcodes next_byte = ord(rom[offset+1]) if self.debug: print "offset: \t\t" + hex(offset) print "current_byte: \t\t" + hex(current_byte) print "next_byte: \t\t" + hex(next_byte) # all two-byte opcodes also have their first byte in there somewhere if (current_byte in opt_table.keys()) or ((current_byte + (next_byte << 8)) in opt_table.keys()): # this might be a two-byte opcode possible_opcode = current_byte + (next_byte << 8) # check if this is a two-byte opcode if possible_opcode in opt_table.keys(): op_code = possible_opcode else: op_code = current_byte op = opt_table[op_code] opstr = op[0].lower() optype = op[1] if self.debug: print "opstr: " + opstr asm_command["type"] = "op" asm_command["id"] = op_code asm_command["format"] = opstr asm_command["opnumberthing"] = optype opstr2 = None base_opstr = copy(opstr) if "x" in opstr: for x in range(0, opstr.count("x")): insertion = ord(rom[offset + 1]) # Certain opcodes will have a local relative jump label # here instead of a raw hex value, but this is # controlled through asm output. insertion = "$" + hex(insertion)[2:] opstr = opstr[:opstr.find("x")].lower() + insertion + opstr[opstr.find("x")+1:].lower() if op_code in relative_jumps: target_address = offset + 2 + c_int8(ord(rom[offset + 1])).value insertion = "asm_" + hex(target_address) if str(target_address) in self.rom.labels.keys(): insertion = self.rom.labels[str(target_address)] opstr2 = base_opstr[:base_opstr.find("x")].lower() + insertion + base_opstr[base_opstr.find("x")+1:].lower() asm_command["formatted_with_labels"] = opstr2 asm_command["target_address"] = target_address current_byte_number += 1 offset += 1 if "?" in opstr: for y in range(0, opstr.count("?")): byte1 = ord(rom[offset + 1]) byte2 = ord(rom[offset + 2]) number = byte1 number += byte2 << 8; # In most cases, you can use a label here. Labels will # be shown during asm output. insertion = "$%.4x" % (number) opstr = opstr[:opstr.find("?")].lower() + insertion + opstr[opstr.find("?")+1:].lower() # This version of the formatted string has labels. In # the future, the actual labels should be parsed # straight out of the "main.asm" file. target_address = number % 0x4000 insertion = "asm_" + hex(target_address) if str(target_address) in self.rom.labels.keys(): insertion = self.rom.labels[str(target_address)] opstr2 = base_opstr[:base_opstr.find("?")].lower() + insertion + base_opstr[base_opstr.find("?")+1:].lower() asm_command["formatted_with_labels"] = opstr2 asm_command["target_address"] = target_address current_byte_number += 2 offset += 2 # Check for relative jumps, construct the formatted asm line. # Also set the usage of labels. if current_byte in [0x18, 0x20] + relative_jumps: # jr or jr nz # generate a label for the byte we're jumping to target_address = offset + 1 + c_int8(ord(rom[offset])).value if target_address in asm_commands.keys(): asm_commands[target_address]["references"] += 1 remote_label = "asm_" + hex(target_address) asm_commands[target_address]["current_label"] = remote_label asm_command["remote_label"] = remote_label # Not sure how to set this, can't be True because an # address referenced multiple times will use a label # despite the label not necessarily being used in the # output. The "use_remote_label" values should be # calculated when rendering the asm output, based on # which addresses and which op codes will be displayed # (within the range). asm_command["use_remote_label"] = "unknown" else: remote_label = "asm_" + hex(target_address) # This remote address might not be part of this # function. asm_commands[target_address] = { "references": 1, "current_label": remote_label, "address": target_address, } # Also, target_address can be negative (before the # start_address that the user originally requested), # and it shouldn't be shown on asm output because the # intermediate bytes (between a negative target_address # and start_address) won't be disassembled. # Don't know yet if this remote address is part of this # function or not. When the remote address is not part # of this function, the label name should not be used, # because that label will not be disassembled in the # output, until the user asks it to. asm_command["use_remote_label"] = "unknown" asm_command["remote_label"] = remote_label elif current_byte == 0x3e: last_a_address = ord(rom[offset + 1]) # store the formatted string for the output later asm_command["formatted"] = opstr if current_byte == 0x21: last_hl_address = byte1 + (byte2 << 8) # this is leftover from pokered, might be meaningless if current_byte == 0xcd: if number == 0x3d97: used_3d97 = True if current_byte == 0xc3 or current_byte in relative_unconditional_jumps: if current_byte == 0xc3: if number == 0x3d97: used_3d97 = True # stop reading at a jump, relative jump or return if current_byte in end_08_scripts_with: is_data = False if not self.has_outstanding_labels(asm_commands, offset): keep_reading = False break else: keep_reading = True else: keep_reading = True else: # This shouldn't really happen, and means that this area of the # ROM probably doesn't represent instructions. asm_command["type"] = "data" # db asm_command["value"] = current_byte keep_reading = False # save this new command in the list asm_commands[asm_command["address"]] = asm_command # jump forward by a byte offset += 1 # also save the last command if necessary if len(asm_commands.keys()) > 0 and asm_commands[asm_commands.keys()[-1]] is not asm_command: asm_commands[asm_command["address"]] = asm_command # store the set of commands on this object self.asm_commands = asm_commands self.end_address = offset + 1 self.last_address = self.end_address
def _read_mseed(mseed_object, starttime=None, endtime=None, headonly=False, sourcename=None, reclen=None, details=False, header_byteorder=None, verbose=None, **kwargs): """ Reads a Mini-SEED file and returns a Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :param mseed_object: Filename or open file like object that contains the binary Mini-SEED data. Any object that provides a read() method will be considered to be a file like object. :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime` :param starttime: Only read data samples after or at the start time. :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime` :param endtime: Only read data samples before or at the end time. :param headonly: Determines whether or not to unpack the data or just read the headers. :type sourcename: str :param sourcename: Only read data with matching SEED ID (can contain wildcards "?" and "*", e.g. "BW.UH2.*" or "*.??Z"). Defaults to ``None``. :param reclen: If it is None, it will be automatically determined for every record. If it is known, just set it to the record length in bytes which will increase the reading speed slightly. :type details: bool, optional :param details: If ``True`` read additional information: timing quality and availability of calibration information. Note, that the traces are then also split on these additional information. Thus the number of traces in a stream will change. Details are stored in the mseed stats AttribDict of each trace. ``False`` specifies for both cases, that this information is not available. ``blkt1001.timing_quality`` specifies the timing quality from 0 to 100 [%]. ``calibration_type`` specifies the type of available calibration information blockettes: - ``1``: Step Calibration (Blockette 300) - ``2``: Sine Calibration (Blockette 310) - ``3``: Pseudo-random Calibration (Blockette 320) - ``4``: Generic Calibration (Blockette 390) - ``-2``: Calibration Abort (Blockette 395) :type header_byteorder: int or str, optional :param header_byteorder: Must be either ``0`` or ``'<'`` for LSBF or little-endian, ``1`` or ``'>'`` for MBF or big-endian. ``'='`` is the native byte order. Used to enforce the header byte order. Useful in some rare cases where the automatic byte order detection fails. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/two_channels.mseed") >>> print(st) # doctest: +ELLIPSIS 2 Trace(s) in Stream: BW.UH3..EHE | 2010-06-20T00:00:00.279999Z - ... | 200.0 Hz, 386 samples BW.UH3..EHZ | 2010-06-20T00:00:00.279999Z - ... | 200.0 Hz, 386 samples >>> from obspy import UTCDateTime >>> st = read("/path/to/two_channels.mseed", ... starttime=UTCDateTime("2010-06-20T00:00:01"), ... sourcename="*.?HZ") >>> print(st) # doctest: +ELLIPSIS 1 Trace(s) in Stream: BW.UH3..EHZ | 2010-06-20T00:00:00.999999Z - ... | 200.0 Hz, 242 samples Read with ``details=True`` to read more details of the file if present. >>> st = read("/path/to/timingquality.mseed", details=True) >>> print(st[0].stats.mseed.blkt1001.timing_quality) 55 ``False`` means that the necessary information could not be found in the file. >>> print(st[0].stats.mseed.calibration_type) False Note that each change in timing quality from record to record may trigger a new Trace object to be created so the Stream object may contain many Trace objects if ``details=True`` is used. >>> print(len(st)) 101 """ # Parse the headonly and reclen flags. if headonly is True: unpack_data = 0 else: unpack_data = 1 if reclen is None: reclen = -1 elif reclen not in VALID_RECORD_LENGTHS: msg = 'Invalid record length. Autodetection will be used.' warnings.warn(msg) reclen = -1 # Determine the byte order. if header_byteorder == "=": header_byteorder = NATIVE_BYTEORDER if header_byteorder is None: header_byteorder = -1 elif header_byteorder in [0, "0", "<"]: header_byteorder = 0 elif header_byteorder in [1, "1", ">"]: header_byteorder = 1 # Parse some information about the file. if header_byteorder == 0: bo = "<" elif header_byteorder > 0: bo = ">" else: bo = None # Determine total size. Either its a file-like object. if hasattr(mseed_object, "tell") and hasattr(mseed_object, "seek"): cur_pos = mseed_object.tell() mseed_object.seek(0, 2) length = mseed_object.tell() - cur_pos mseed_object.seek(cur_pos, 0) # Or a file name. else: length = os.path.getsize(mseed_object) if length < 128: msg = "The smallest possible mini-SEED record is made up of 128 " \ "bytes. The passed buffer or file contains only %i." % length raise ObsPyMSEEDFilesizeTooSmallError(msg) elif length > 2 ** 31: msg = ("ObsPy can currently not directly read mini-SEED files that " "are larger than 2^31 bytes (2048 MiB). To still read it, " "please read the file in chunks as documented here: " "https://github.com/obspy/obspy/pull/1419" "#issuecomment-221582369") raise ObsPyMSEEDFilesizeTooLargeError(msg) info = util.get_record_information(mseed_object, endian=bo) # Map the encoding to a readable string value. if "encoding" not in info: # Hopefully detected by libmseed. info["encoding"] = None elif info["encoding"] in ENCODINGS: info['encoding'] = ENCODINGS[info['encoding']][0] elif info["encoding"] in UNSUPPORTED_ENCODINGS: msg = ("Encoding '%s' (%i) is not supported by ObsPy. Please send " "the file to the ObsPy developers so that we can add " "support for it.") % \ (UNSUPPORTED_ENCODINGS[info['encoding']], info['encoding']) raise ValueError(msg) else: msg = "Encoding '%i' is not a valid MiniSEED encoding." % \ info['encoding'] raise ValueError(msg) record_length = info["record_length"] # Only keep information relevant for the whole file. info = {'filesize': info['filesize']} # If it's a file name just read it. if isinstance(mseed_object, (str, native_str)): # Read to NumPy array which is used as a buffer. bfr_np = np.fromfile(mseed_object, dtype=np.int8) elif hasattr(mseed_object, 'read'): bfr_np = from_buffer(mseed_object.read(), dtype=np.int8) # Search for data records and pass only the data part to the underlying C # routine. offset = 0 # 0 to 9 are defined in a row in the ASCII charset. min_ascii = ord('0') # Small function to check whether an array of ASCII values contains only # digits. def isdigit(x): return True if (x - min_ascii).max() <= 9 else False while True: # This should never happen if (isdigit(bfr_np[offset:offset + 6]) is False) or \ (bfr_np[offset + 6] not in VALID_CONTROL_HEADERS): msg = 'Not a valid (Mini-)SEED file' raise Exception(msg) elif bfr_np[offset + 6] in SEED_CONTROL_HEADERS: offset += record_length continue break bfr_np = bfr_np[offset:] buflen = len(bfr_np) # If no selection is given pass None to the C function. if starttime is None and endtime is None and sourcename is None: selections = None else: select_time = SelectTime() selections = Selections() selections.timewindows.contents = select_time if starttime is not None: if not isinstance(starttime, UTCDateTime): msg = 'starttime needs to be a UTCDateTime object' raise ValueError(msg) selections.timewindows.contents.starttime = \ util._convert_datetime_to_mstime(starttime) else: # HPTERROR results in no starttime. selections.timewindows.contents.starttime = HPTERROR if endtime is not None: if not isinstance(endtime, UTCDateTime): msg = 'endtime needs to be a UTCDateTime object' raise ValueError(msg) selections.timewindows.contents.endtime = \ util._convert_datetime_to_mstime(endtime) else: # HPTERROR results in no starttime. selections.timewindows.contents.endtime = HPTERROR if sourcename is not None: if not isinstance(sourcename, (str, native_str)): msg = 'sourcename needs to be a string' raise ValueError(msg) # libmseed uses underscores as separators and allows filtering # after the dataquality which is disabled here to not confuse # users. (* == all data qualities) selections.srcname = (sourcename.replace('.', '_') + '_*').\ encode('ascii', 'ignore') else: selections.srcname = b'*' all_data = [] # Use a callback function to allocate the memory and keep track of the # data. def allocate_data(samplecount, sampletype): # Enhanced sanity checking for libmseed 2.10 can result in the # sampletype not being set. Just return an empty array in this case. if sampletype == b"\x00": data = np.empty(0) else: data = np.empty(samplecount, dtype=DATATYPES[sampletype]) all_data.append(data) return data.ctypes.data # XXX: Do this properly! # Define Python callback function for use in C function. Return a long so # it hopefully works on 32 and 64 bit systems. alloc_data = C.CFUNCTYPE(C.c_longlong, C.c_int, C.c_char)(allocate_data) try: verbose = int(verbose) except Exception: verbose = 0 clibmseed.verbose = bool(verbose) try: lil = clibmseed.readMSEEDBuffer( bfr_np, buflen, selections, C.c_int8(unpack_data), reclen, C.c_int8(verbose), C.c_int8(details), header_byteorder, alloc_data) except InternalMSEEDError as e: msg = e.args[0] if offset and offset in str(e): # Append the offset of the full SEED header if necessary. That way # the C code does not have to deal with it. if offset and "offset" in msg: msg = ("%s\nThe file contains a %i byte dataless part at the " "beginning. Make sure to add that to the reported " "offset to get the actual location in the file." % ( msg, offset)) raise InternalMSEEDError(msg) else: raise finally: # Make sure to reset the verbosity. clibmseed.verbose = True del selections traces = [] try: current_id = lil.contents # Return stream if not traces are found. except ValueError: clibmseed.lil_free(lil) del lil return Stream() while True: # Init header with the essential information. header = {'network': current_id.network.strip(), 'station': current_id.station.strip(), 'location': current_id.location.strip(), 'channel': current_id.channel.strip(), 'mseed': {'dataquality': current_id.dataquality}} # Loop over segments. try: current_segment = current_id.firstSegment.contents except ValueError: break while True: header['sampling_rate'] = current_segment.samprate header['starttime'] = \ util._convert_mstime_to_datetime(current_segment.starttime) header['mseed']['number_of_records'] = current_segment.recordcnt header['mseed']['encoding'] = \ ENCODINGS[current_segment.encoding][0] header['mseed']['byteorder'] = \ "<" if current_segment.byteorder == 0 else ">" header['mseed']['record_length'] = current_segment.reclen if details: timing_quality = current_segment.timing_quality if timing_quality == 0xFF: # 0xFF is mask for not known timing timing_quality = False header['mseed']['blkt1001'] = {} header['mseed']['blkt1001']['timing_quality'] = timing_quality header['mseed']['calibration_type'] = \ current_segment.calibration_type \ if current_segment.calibration_type != -1 else False if headonly is False: # The data always will be in sequential order. data = all_data.pop(0) header['npts'] = len(data) else: data = np.array([]) header['npts'] = current_segment.samplecnt # Make sure to init the number of samples. # Py3k: convert to unicode header['mseed'] = dict((k, v.decode()) if isinstance(v, bytes) else (k, v) for k, v in header['mseed'].items()) header = dict((k, util._decode_header_field(k, v)) if isinstance(v, bytes) else (k, v) for k, v in header.items()) trace = Trace(header=header, data=data) # Append global information. for key, value in info.items(): setattr(trace.stats.mseed, key, value) traces.append(trace) # A Null pointer access results in a ValueError try: current_segment = current_segment.next.contents except ValueError: break try: current_id = current_id.next.contents except ValueError: break clibmseed.lil_free(lil) # NOQA del lil # NOQA return Stream(traces=traces)
def ToInt8(v): return ctypes.c_int8(_int(v)).value
def output_bank_opcodes(original_offset, max_byte_count=0x4000): #fs = current_address #b = bank_byte #in = input_data -- rom #bank_size = byte_count #i = offset #ad = end_address #a, oa = current_byte_number bank_id = 0 if original_offset > 0x8000: bank_id = original_offset / 0x4000 print "bank id is: " + str(bank_id) last_hl_address = None #for when we're scanning the main map script last_a_address = None used_3d97 = False global rom offset = original_offset current_byte_number = 0 #start from the beginning #we don't actually have an end address, but we'll just say $4000 end_address = original_offset + max_byte_count byte_labels = {} output = "" keep_reading = True while offset <= end_address and keep_reading: current_byte = ord(rom[offset]) is_data = False maybe_byte = current_byte #first check if this byte already has a label #if it does, use the label #if not, generate a new label if offset in byte_labels.keys(): line_label = byte_labels[offset]["name"] byte_labels[offset]["usage"] += 1 else: line_label = asm_label(offset) byte_labels[offset] = {} byte_labels[offset]["name"] = line_label byte_labels[offset]["usage"] = 0 byte_labels[offset]["definition"] = True output += line_label.lower() + "\n" #" ; " + hex(offset) + "\n" #find out if there's a two byte key like this temp_maybe = maybe_byte temp_maybe += ( ord(rom[offset+1]) << 8) if temp_maybe in opt_table.keys() and ord(rom[offset+1])!=0: opstr = opt_table[temp_maybe][0].lower() if "x" in opstr: for x in range(0, opstr.count("x")): insertion = ord(rom[offset + 1]) insertion = "$" + hex(insertion)[2:] opstr = opstr[:opstr.find("x")].lower() + insertion + opstr[opstr.find("x")+1:].lower() current_byte += 1 offset += 1 if "?" in opstr: for y in range(0, opstr.count("?")): byte1 = ord(rom[offset + 1]) byte2 = ord(rom[offset + 2]) number = byte1 number += byte2 << 8; insertion = "$%.4x" % (number) opstr = opstr[:opstr.find("?")].lower() + insertion + opstr[opstr.find("?")+1:].lower() current_byte_number += 2 offset += 2 output += spacing + opstr #+ " ; " + hex(offset) output += "\n" current_byte_number += 2 offset += 2 elif maybe_byte in opt_table.keys(): op_code = opt_table[maybe_byte] op_code_type = op_code[1] op_code_byte = maybe_byte #type = -1 when it's the E op #if op_code_type != -1: if op_code_type == 0 and ord(rom[offset]) == op_code_byte: op_str = op_code[0].lower() output += spacing + op_code[0].lower() #+ " ; " + hex(offset) output += "\n" offset += 1 current_byte_number += 1 elif op_code_type == 1 and ord(rom[offset]) == op_code_byte: oplen = len(op_code[0]) opstr = copy(op_code[0]) xes = op_code[0].count("x") include_comment = False for x in range(0, xes): insertion = ord(rom[offset + 1]) insertion = "$" + hex(insertion)[2:] if current_byte == 0x18 or current_byte==0x20 or current_byte in relative_jumps: #jr or jr nz #generate a label for the byte we're jumping to target_address = offset + 2 + c_int8(ord(rom[offset + 1])).value if target_address in byte_labels.keys(): byte_labels[target_address]["usage"] = 1 + byte_labels[target_address]["usage"] line_label2 = byte_labels[target_address]["name"] else: line_label2 = asm_label(target_address) byte_labels[target_address] = {} byte_labels[target_address]["name"] = line_label2 byte_labels[target_address]["usage"] = 1 byte_labels[target_address]["definition"] = False insertion = line_label2.lower() include_comment = True elif current_byte == 0x3e: last_a_address = ord(rom[offset + 1]) opstr = opstr[:opstr.find("x")].lower() + insertion + opstr[opstr.find("x")+1:].lower() output += spacing + opstr if include_comment: output += " ; " + hex(offset) if current_byte in relative_jumps: output += " $" + hex(ord(rom[offset + 1]))[2:] output += "\n" current_byte_number += 1 offset += 1 insertion = "" current_byte_number += 1 offset += 1 include_comment = False elif op_code_type == 2 and ord(rom[offset]) == op_code_byte: oplen = len(op_code[0]) opstr = copy(op_code[0]) qes = op_code[0].count("?") for x in range(0, qes): byte1 = ord(rom[offset + 1]) byte2 = ord(rom[offset + 2]) number = byte1 number += byte2 << 8; insertion = "$%.4x" % (number) if maybe_byte in call_commands or current_byte in relative_unconditional_jumps or current_byte in relative_jumps: result = find_label(insertion, bank_id) if result != None: insertion = result opstr = opstr[:opstr.find("?")].lower() + insertion + opstr[opstr.find("?")+1:].lower() output += spacing + opstr #+ " ; " + hex(offset) output += "\n" current_byte_number += 2 offset += 2 current_byte_number += 1 offset += 1 if current_byte == 0x21: last_hl_address = byte1 + (byte2 << 8) if current_byte == 0xcd: if number == 0x3d97: used_3d97 = True #duck out if this is jp $24d7 if current_byte == 0xc3 or current_byte in relative_unconditional_jumps: if current_byte == 0xc3: if number == 0x3d97: used_3d97 = True #if number == 0x24d7: #jp if not has_outstanding_labels(byte_labels) or all_outstanding_labels_are_reverse(byte_labels, offset): keep_reading = False is_data = False break else: is_data = True #stop reading at a jump, relative jump or return if current_byte in end_08_scripts_with: if not has_outstanding_labels(byte_labels) and all_outstanding_labels_are_reverse(byte_labels, offset): keep_reading = False is_data = False #cleanup break else: is_data = False keep_reading = True else: is_data = False keep_reading = True else: # if is_data and keep_reading: output += spacing + "db $" + hex(ord(rom[offset]))[2:] #+ " ; " + hex(offset) output += "\n" offset += 1 current_byte_number += 1 #else the while loop would have spit out the opcode #these two are done prior #offset += 1 #current_byte_number += 1 #clean up unused labels for label_line in byte_labels.keys(): address = label_line label_line = byte_labels[label_line] if label_line["usage"] == 0: output = output.replace((label_line["name"] + "\n").lower(), "") #add the offset of the final location output += "; " + hex(offset) return (output, offset, last_hl_address, last_a_address, used_3d97)
def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle,BaudRate,Timeout,ctypes.byref(buf)) #print 'set protocol buf %s ret %s' % (buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) #eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle,nodeID,ctypes.byref(buf)) #print 'clear fault buf %s, ret %s' % (buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle,nodeID,ctypes.byref(plsenabled),ctypes.byref(buf)) #print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle,nodeID,ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding') else: logging.error(__name__ + ' EPOS motor was not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder counts in pulses per turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle,nodeID,Counts,PositionSensorType,ctypes.byref(buf)) ## if ret == int(0): ## print 'errr' ## errbuf = ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise ValueError(errbuf.value) # For some reason, it appears normal in the LabVIEW code that this # function actually returns an error, i.e. the return value is zero # and the buffer has a non-zero error code in it; the LabVIEW code # doesn't check it. # Also, it appears that in the 2005 version of this DLL, the function # VCS_GetErrorInfo doesn't exist! # Get operation mode, check if it's 1 -- this is "profile position mode" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode is not 1, make it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration,ctypes.byref(buf)) if (long(pProfileVelocity.contents.value) > long(11400) or long(pProfileAcceleration.contents.value) > long(60000) or long(pProfileDeceleration.contents.value) > long(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration,ctypes.byref(buf)) # Now get the motor position (stored position offset) # from the device's "homposition" object self._offset = self.get_offset() # Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32*1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32*1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got from the LabVIEW program -- I don't think # any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32*1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got from the LabVIEW program -- I don't think # any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32*1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] #print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD & 0xffff) # Set the minimum and maximum wavelengths for the motor self._minwl = float(firstHalf)/10.0 self._maxwl = float(secondHalf)/10.0 # print 'first %s second %s' % (firstHalf, secondHalf) # This returns '10871' and '11859' for the Sacher, which are the correct # wavelength ranges in Angstroms #print 'Now calculate the current wavelength position:' self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC print 'Current wavelength: %.3f nm' % self._currentwl return True
def nvpair_value_int8(nvp): val = C.c_int8() ret = _nvpair_value_int8(nvp, C.byref(val)) if ret != 0: raise _error(ret) return val
def _read_mseed(mseed_object, starttime=None, endtime=None, headonly=False, sourcename=None, reclen=None, details=False, header_byteorder=None, verbose=None, **kwargs): """ Reads a Mini-SEED file and returns a Stream object. .. warning:: This function should NOT be called directly, it registers via the ObsPy :func:`~obspy.core.stream.read` function, call this instead. :param mseed_object: Filename or open file like object that contains the binary Mini-SEED data. Any object that provides a read() method will be considered to be a file like object. :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime` :param starttime: Only read data samples after or at the start time. :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime` :param endtime: Only read data samples before or at the end time. :param headonly: Determines whether or not to unpack the data or just read the headers. :type sourcename: str :param sourcename: Source name has to have the structure 'network.station.location.channel' and can contain globbing characters. Defaults to ``None``. :param reclen: If it is None, it will be automatically determined for every record. If it is known, just set it to the record length in bytes which will increase the reading speed slightly. :type details: bool, optional :param details: If ``True`` read additional information: timing quality and availability of calibration information. Note, that the traces are then also split on these additional information. Thus the number of traces in a stream will change. Details are stored in the mseed stats AttribDict of each trace. ``False`` specifies for both cases, that this information is not available. ``blkt1001.timing_quality`` specifies the timing quality from 0 to 100 [%]. ``calibration_type`` specifies the type of available calibration information blockettes: - ``1``: Step Calibration (Blockette 300) - ``2``: Sine Calibration (Blockette 310) - ``3``: Pseudo-random Calibration (Blockette 320) - ``4``: Generic Calibration (Blockette 390) - ``-2``: Calibration Abort (Blockette 395) :type header_byteorder: int or str, optional :param header_byteorder: Must be either ``0`` or ``'<'`` for LSBF or little-endian, ``1`` or ``'>'`` for MBF or big-endian. ``'='`` is the native byte order. Used to enforce the header byte order. Useful in some rare cases where the automatic byte order detection fails. .. rubric:: Example >>> from obspy import read >>> st = read("/path/to/two_channels.mseed") >>> print(st) # doctest: +ELLIPSIS 2 Trace(s) in Stream: BW.UH3..EHE | 2010-06-20T00:00:00.279999Z - ... | 200.0 Hz, 386 samples BW.UH3..EHZ | 2010-06-20T00:00:00.279999Z - ... | 200.0 Hz, 386 samples >>> from obspy import UTCDateTime >>> st = read("/path/to/test.mseed", ... starttime=UTCDateTime("2003-05-29T02:16:00"), ... selection="NL.*.*.?HZ") >>> print(st) # doctest: +ELLIPSIS 1 Trace(s) in Stream: NL.HGN.00.BHZ | 2003-05-29T02:15:59.993400Z - ... | 40.0 Hz, 5629 samples Read with ``details=True`` to read more details of the file if present. >>> st = read("/path/to/timingquality.mseed", details=True) >>> print(st[0].stats.mseed.blkt1001.timing_quality) 55 ``False`` means that the necessary information could not be found in the file. >>> print(st[0].stats.mseed.calibration_type) False Note that each change in timing quality from record to record may trigger a new Trace object to be created so the Stream object may contain many Trace objects if ``details=True`` is used. >>> print(len(st)) 101 """ # Parse the headonly and reclen flags. if headonly is True: unpack_data = 0 else: unpack_data = 1 if reclen is None: reclen = -1 elif reclen not in VALID_RECORD_LENGTHS: msg = 'Invalid record length. Autodetection will be used.' warnings.warn(msg) reclen = -1 # Determine the byte order. if header_byteorder == "=": header_byteorder = NATIVE_BYTEORDER if header_byteorder is None: header_byteorder = -1 elif header_byteorder in [0, "0", "<"]: header_byteorder = 0 elif header_byteorder in [1, "1", ">"]: header_byteorder = 1 # The quality flag is no more supported. Raise a warning. if 'quality' in kwargs: msg = 'The quality flag is no longer supported in this version of ' + \ 'obspy.io.mseed. obspy.io.mseed.util has some functions with ' \ 'similar behavior.' warnings.warn(msg, category=DeprecationWarning) # Parse some information about the file. if header_byteorder == 0: bo = "<" elif header_byteorder > 0: bo = ">" else: bo = None info = util.get_record_information(mseed_object, endian=bo) # Map the encoding to a readable string value. if info["encoding"] in ENCODINGS: info['encoding'] = ENCODINGS[info['encoding']][0] elif info["encoding"] in UNSUPPORTED_ENCODINGS: msg = ("Encoding '%s' (%i) is not supported by ObsPy. Please send " "the file to the ObsPy developers so that we can add " "support for it.") % \ (UNSUPPORTED_ENCODINGS[info['encoding']], info['encoding']) raise ValueError(msg) else: msg = "Encoding '%i' is not a valid MiniSEED encoding." % \ info['encoding'] raise ValueError(msg) # Only keep information relevant for the whole file. info = {'encoding': info['encoding'], 'filesize': info['filesize'], 'record_length': info['record_length'], 'byteorder': info['byteorder'], 'number_of_records': info['number_of_records']} # If it's a file name just read it. if isinstance(mseed_object, (str, native_str)): # Read to NumPy array which is used as a buffer. bfrNp = np.fromfile(mseed_object, dtype=np.int8) elif hasattr(mseed_object, 'read'): bfrNp = np.fromstring(mseed_object.read(), dtype=np.int8) # Get the record length try: record_length = pow(2, int(''.join([chr(_i) for _i in bfrNp[19:21]]))) except ValueError: record_length = 4096 # Search for data records and pass only the data part to the underlying C # routine. offset = 0 # 0 to 9 are defined in a row in the ASCII charset. min_ascii = ord('0') # Small function to check whether an array of ASCII values contains only # digits. def isdigit(x): return True if (x - min_ascii).max() <= 9 else False while True: # This should never happen if (isdigit(bfrNp[offset:offset + 6]) is False) or \ (bfrNp[offset + 6] not in VALID_CONTROL_HEADERS): msg = 'Not a valid (Mini-)SEED file' raise Exception(msg) elif bfrNp[offset + 6] in SEED_CONTROL_HEADERS: offset += record_length continue break bfrNp = bfrNp[offset:] buflen = len(bfrNp) # If no selection is given pass None to the C function. if starttime is None and endtime is None and sourcename is None: selections = None else: select_time = SelectTime() selections = Selections() selections.timewindows.contents = select_time if starttime is not None: if not isinstance(starttime, UTCDateTime): msg = 'starttime needs to be a UTCDateTime object' raise ValueError(msg) selections.timewindows.contents.starttime = \ util._convert_datetime_to_MSTime(starttime) else: # HPTERROR results in no starttime. selections.timewindows.contents.starttime = HPTERROR if endtime is not None: if not isinstance(endtime, UTCDateTime): msg = 'endtime needs to be a UTCDateTime object' raise ValueError(msg) selections.timewindows.contents.endtime = \ util._convert_datetime_to_MSTime(endtime) else: # HPTERROR results in no starttime. selections.timewindows.contents.endtime = HPTERROR if sourcename is not None: if not isinstance(sourcename, (str, native_str)): msg = 'sourcename needs to be a string' raise ValueError(msg) # libmseed uses underscores as separators and allows filtering # after the dataquality which is disabled here to not confuse # users. (* == all data qualities) selections.srcname = (sourcename.replace('.', '_') + '_*').\ encode('ascii', 'ignore') else: selections.srcname = b'*' all_data = [] # Use a callback function to allocate the memory and keep track of the # data. def allocate_data(samplecount, sampletype): # Enhanced sanity checking for libmseed 2.10 can result in the # sampletype not being set. Just return an empty array in this case. if sampletype == b"\x00": data = np.empty(0) else: data = np.empty(samplecount, dtype=DATATYPES[sampletype]) all_data.append(data) return data.ctypes.data # XXX: Do this properly! # Define Python callback function for use in C function. Return a long so # it hopefully works on 32 and 64 bit systems. allocData = C.CFUNCTYPE(C.c_long, C.c_int, C.c_char)(allocate_data) def log_error_or_warning(msg): msg = msg.decode() if msg.startswith("ERROR: "): raise InternalMSEEDReadingError(msg[7:].strip()) if msg.startswith("INFO: "): msg = msg[6:].strip() # Append the offset of the full SEED header if necessary. That way # the C code does not have to deal with it. if offset and "offset" in msg: msg = ("%s The file contains a %i byte dataless part at the " "beginning. Make sure to add that to the reported " "offset to get the actual location in the file." % ( msg, offset)) warnings.warn(msg, InternalMSEEDReadingWarning) diag_print = C.CFUNCTYPE(C.c_void_p, C.c_char_p)(log_error_or_warning) def log_message(msg): print(msg[6:].strip()) log_print = C.CFUNCTYPE(C.c_void_p, C.c_char_p)(log_message) try: verbose = int(verbose) except: verbose = 0 lil = clibmseed.readMSEEDBuffer( bfrNp, buflen, selections, C.c_int8(unpack_data), reclen, C.c_int8(verbose), C.c_int8(details), header_byteorder, allocData, diag_print, log_print) # XXX: Check if the freeing works. del selections traces = [] try: currentID = lil.contents # Return stream if not traces are found. except ValueError: clibmseed.lil_free(lil) del lil return Stream() while True: # Init header with the essential information. header = {'network': currentID.network.strip(), 'station': currentID.station.strip(), 'location': currentID.location.strip(), 'channel': currentID.channel.strip(), 'mseed': {'dataquality': currentID.dataquality}} # Loop over segments. try: currentSegment = currentID.firstSegment.contents except ValueError: break while True: header['sampling_rate'] = currentSegment.samprate header['starttime'] = \ util._convert_MSTime_to_datetime(currentSegment.starttime) if details: timing_quality = currentSegment.timing_quality if timing_quality == 0xFF: # 0xFF is mask for not known timing timing_quality = False header['mseed']['blkt1001'] = {} header['mseed']['blkt1001']['timing_quality'] = timing_quality header['mseed']['calibration_type'] = \ currentSegment.calibration_type \ if currentSegment.calibration_type != -1 else False if headonly is False: # The data always will be in sequential order. data = all_data.pop(0) header['npts'] = len(data) else: data = np.array([]) header['npts'] = currentSegment.samplecnt # Make sure to init the number of samples. # Py3k: convert to unicode header['mseed'] = dict((k, v.decode()) if isinstance(v, bytes) else (k, v) for k, v in header['mseed'].items()) header = dict((k, v.decode()) if isinstance(v, bytes) else (k, v) for k, v in header.items()) trace = Trace(header=header, data=data) # Append information. for key, value in info.items(): setattr(trace.stats.mseed, key, value) traces.append(trace) # A Null pointer access results in a ValueError try: currentSegment = currentSegment.next.contents except ValueError: break try: currentID = currentID.next.contents except ValueError: break clibmseed.lil_free(lil) # NOQA del lil # NOQA return Stream(traces=traces)
def writeInt8(self, n): data = struct.pack('!b', ctypes.c_int8(n).value) self.writeFully(data)
import serial data_type_size_lut = { 'uint8_t': 1, 'int8_t': 1, 'uint16_t': 2, 'int16_t': 2, 'uint32_t': 4, 'int32_t': 4, 'uint64_t': 8, 'int64_t': 8 } conversion_lut = { 'uint8_t': lambda buf: ctypes.c_uint8(buf[0]).value, 'int8_t': lambda buf: ctypes.c_int8(buf[0]).value, 'uint16_t': lambda buf: ctypes.c_uint16((buf[0] << 8) | buf[1]).value, 'int16_t': lambda buf: ctypes.c_int16((buf[0] << 8) | buf[1]).value, 'uint32_t': lambda buf: ctypes.c_uint32((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]).value, 'int32_t': lambda buf: ctypes.c_int32((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]).value, 'uint64_t': lambda buf: ctypes.c_uint64((buf[0] << 56) | (buf[1] << 48) | (buf[2] << 40) | (buf[3] << 32) | (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]).value, 'int64_t': lambda buf: ctypes.c_int64((buf[0] << 56) | (buf[1] << 48) | (buf[2] << 40) | (buf[3] << 32) | (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]).value } with serial.Serial(sys.argv[1], 115200) as s: with open(os.path.join(os.getcwd(), sys.argv[2]), 'r') as config_file: messages = json.load(config_file) while True: try: message_code = ord(s.read(1))
def output_bank_opcodes(original_offset, max_byte_count=0x4000, include_last_address=True, stop_at=[], debug = False): #fs = current_address #b = bank_byte #in = input_data -- rom #bank_size = byte_count #i = offset #ad = end_address #a, oa = current_byte_number # stop_at can be used to supply a list of addresses to not disassemble # over. This is useful if you know in advance that there are a lot of # fall-throughs. load_labels() load_rom() bank_id = original_offset / 0x4000 if debug: print "bank id is: " + str(bank_id) last_hl_address = None #for when we're scanning the main map script last_a_address = None used_3d97 = False global rom offset = original_offset current_byte_number = 0 #start from the beginning #we don't actually have an end address, but we'll just say $4000 end_address = original_offset + max_byte_count byte_labels = {} first_loop = True output = "" keep_reading = True while offset <= end_address and keep_reading: current_byte = rom[offset] is_data = False maybe_byte = current_byte # stop at any address if not first_loop and offset in stop_at: keep_reading = False break #first check if this byte already has a label #if it does, use the label #if not, generate a new label if offset in byte_labels.keys(): line_label = byte_labels[offset]["name"] byte_labels[offset]["usage"] += 1 output += "\n" else: line_label = asm_label(offset) byte_labels[offset] = {} byte_labels[offset]["name"] = line_label byte_labels[offset]["usage"] = 0 byte_labels[offset]["definition"] = True output += line_label.lower() + "\n" #" ; " + hex(offset) + "\n" #find out if there's a two byte key like this temp_maybe = maybe_byte temp_maybe += ( rom[offset+1] << 8) if temp_maybe in opt_table.keys() and rom[offset+1]!=0: opstr = opt_table[temp_maybe][0].lower() if "x" in opstr: for x in range(0, opstr.count("x")): insertion = rom[offset + 1] insertion = "$" + hex(insertion)[2:] opstr = opstr[:opstr.find("x")].lower() + insertion + opstr[opstr.find("x")+1:].lower() current_byte += 1 offset += 1 if "?" in opstr: for y in range(0, opstr.count("?")): byte1 = rom[offset + 1] byte2 = rom[offset + 2] number = byte1 number += byte2 << 8; insertion = "$%.4x" % (number) opstr = opstr[:opstr.find("?")].lower() + insertion + opstr[opstr.find("?")+1:].lower() current_byte_number += 2 offset += 2 output += spacing + opstr #+ " ; " + hex(offset) output += "\n" current_byte_number += 2 offset += 2 elif maybe_byte in opt_table.keys(): op_code = opt_table[maybe_byte] op_code_type = op_code[1] op_code_byte = maybe_byte #type = -1 when it's the E op #if op_code_type != -1: if op_code_type == 0 and rom[offset] == op_code_byte: op_str = op_code[0].lower() output += spacing + op_code[0].lower() #+ " ; " + hex(offset) output += "\n" offset += 1 current_byte_number += 1 elif op_code_type == 1 and rom[offset] == op_code_byte: oplen = len(op_code[0]) opstr = copy(op_code[0]) xes = op_code[0].count("x") include_comment = False for x in range(0, xes): insertion = rom[offset + 1] insertion = "$" + hex(insertion)[2:] if current_byte == 0x18 or current_byte==0x20 or current_byte in relative_jumps: #jr or jr nz #generate a label for the byte we're jumping to target_address = offset + 2 + c_int8(rom[offset + 1]).value if target_address in byte_labels.keys(): byte_labels[target_address]["usage"] = 1 + byte_labels[target_address]["usage"] line_label2 = byte_labels[target_address]["name"] else: line_label2 = asm_label(target_address) byte_labels[target_address] = {} byte_labels[target_address]["name"] = line_label2 byte_labels[target_address]["usage"] = 1 byte_labels[target_address]["definition"] = False insertion = line_label2.lower() if has_outstanding_labels(byte_labels) and all_outstanding_labels_are_reverse(byte_labels, offset): include_comment = True elif current_byte == 0x3e: last_a_address = rom[offset + 1] opstr = opstr[:opstr.find("x")].lower() + insertion + opstr[opstr.find("x")+1:].lower() # because the $ff00+$ff syntax is silly if opstr.count("$") > 1 and "+" in opstr: first_orig = opstr[opstr.find("$"):opstr.find("+")] first_val = eval(first_orig.replace("$","0x")) second_orig = opstr[opstr.find("+$")+1:opstr.find("]")] second_val = eval(second_orig.replace("$","0x")) combined_val = "$%.4x" % (first_val + second_val) result = find_label(combined_val, bank_id) if result != None: combined_val = result replacetron = "[%s+%s]" % (first_orig, second_orig) opstr = opstr.replace(replacetron, "[%s]" % combined_val) output += spacing + opstr if include_comment: output += " ; " + hex(offset) if current_byte in relative_jumps: output += " $" + hex(rom[offset + 1])[2:] output += "\n" current_byte_number += 1 offset += 1 insertion = "" current_byte_number += 1 offset += 1 include_comment = False elif op_code_type == 2 and rom[offset] == op_code_byte: oplen = len(op_code[0]) opstr = copy(op_code[0]) qes = op_code[0].count("?") for x in range(0, qes): byte1 = rom[offset + 1] byte2 = rom[offset + 2] number = byte1 number += byte2 << 8 insertion = "$%.4x" % (number) result = find_label(insertion, bank_id) if result != None: insertion = result opstr = opstr[:opstr.find("?")].lower() + insertion + opstr[opstr.find("?")+1:].lower() output += spacing + opstr #+ " ; " + hex(offset) output += "\n" current_byte_number += 2 offset += 2 current_byte_number += 1 offset += 1 if current_byte == 0x21: last_hl_address = byte1 + (byte2 << 8) if current_byte == 0xcd: if number == 0x3d97: used_3d97 = True #duck out if this is jp $24d7 if current_byte == 0xc3 or current_byte in relative_unconditional_jumps: if current_byte == 0xc3: if number == 0x3d97: used_3d97 = True #if number == 0x24d7: #jp if not has_outstanding_labels(byte_labels) or all_outstanding_labels_are_reverse(byte_labels, offset): keep_reading = False is_data = False break else: is_data = True #stop reading at a jump, relative jump or return if current_byte in end_08_scripts_with: if not has_outstanding_labels(byte_labels) or all_outstanding_labels_are_reverse(byte_labels, offset): keep_reading = False is_data = False #cleanup break else: is_data = False keep_reading = True else: is_data = False keep_reading = True else: #if is_data and keep_reading: output += spacing + "db $" + hex(rom[offset])[2:] #+ " ; " + hex(offset) output += "\n" offset += 1 current_byte_number += 1 #else the while loop would have spit out the opcode #these two are done prior #offset += 1 #current_byte_number += 1 if current_byte in relative_unconditional_jumps + end_08_scripts_with: output += "\n" first_loop = False #clean up unused labels for label_line in byte_labels.keys(): address = label_line label_line = byte_labels[label_line] if label_line["usage"] == 0: output = output.replace((label_line["name"] + "\n").lower(), "") #tone down excessive spacing output = output.replace("\n\n\n","\n\n") #add the offset of the final location if include_last_address: output += "; " + hex(offset) return (output, offset, last_hl_address, last_a_address, used_3d97)