def l0id_from_data_word(data_word): if data_word.is_start_of_event(): header = DataFormat.BitFieldWordValue(DataFormat.EVT_HDR1, data_word.contents) l0id = header.getField("L0ID") return l0id return None
def __init__( self, contents, is_metadata=False, timestamp=None, timestamp_units=None ): self._contents = contents self._is_metadata = is_metadata self._flag = None self._timestamp = timestamp self._timestamp_units = timestamp_units if is_metadata: gen = DataFormat.BitFieldWordValue(DataFormat.GenMetadata, contents) self._flag = gen.getField("FLAG")
def _parse_footer(self): footer_descriptors = [ DataFormat.EVT_FTR1, DataFormat.EVT_FTR2, DataFormat.EVT_FTR3, ] if len(self.footer_words) != len(footer_descriptors): raise ValueError( "ERROR Cannot parse footer if event data has not been loaded!" ) for i, descriptor in enumerate(footer_descriptors): data_word = self.footer_words[i] bitfield = DataFormat.BitFieldWordValue(descriptor, data_word.contents) self._footer_fields.append(bitfield) field_names = bitfield.classobj.fields for fn in field_names: self._footer_field_map[fn.name] = i
def file_event_generator(filename, endian="little", n_to_load=-1): path = Path(filename) ok = path.exists() and path.is_file() if not ok: raise Exception(f"Cannot find provided file {filename}") at_event_end = False n_events_loaded = 0 with open(filename, "rb") as ifile: current_event = None filesize = os.stat(filename).st_size for _ in range(0, filesize, 9): data = ifile.read(9) if len(data) != 9: raise Exception(f"Malformed event data file {filename}") fmt = {"little": "<?Q", "big": ">?Q"}[endian] is_metadata, contents = struct.unpack(fmt, data) word = DataWord(contents, is_metadata) if word.is_event_header_start(): if at_event_end and current_event is not None: current_event.parse() yield current_event n_events_loaded += 1 if n_to_load > 0 and n_events_loaded >= n_to_load: break header = DataFormat.BitFieldWordValue(DataFormat.EVT_HDR1, contents) current_event = DataEvent(header.getField("L0ID")) if current_event is not None: current_event.add_word(word) if word.is_event_footer_start(): at_event_end = True
def load_events(data_words=[], endian="little", n_to_load=-1, l0id_request=-1): """ Load data from a list of DataWord objects and fill events. """ current_event = None events = [] l0ids_loaded = set() for word in data_words: if word.is_event_header_start(): if n_to_load > 0 and len(events) >= n_to_load: break if l0id_request > 0 and int(l0id_request) in l0ids_loaded: break header = DataFormat.BitFieldWordValue(DataFormat.EVT_HDR1, word.contents) current_event = DataEvent(header.getField("L0ID")) l0ids_loaded.add(int(current_event.l0id)) if current_event is not None: current_event.add_word(word) if word.is_event_footer_start(): events.append(current_event) if int(l0id_request) > 0 and len(events) > 0: tmp = [] for event in events: if int(event.l0id) == int(l0id_request): tmp.append(event) break events = tmp for event in events: event.parse() return events
def load_events_from_file( filename, endian="little", n_to_load=-1, l0id_request=-1, load_timing_info=False ): path = Path(filename) ok = path.exists() and path.is_file() if not ok: raise Exception(f"Cannot find provided file {filename}") if n_to_load > 0 and l0id_request > 0: raise Exception( "ERROR Cannot request specific number of events AND a specific L0ID at the same time" ) timing_gen = None time_units = None if load_timing_info: timing_gen = timing_info_gen(filename) time_units = next(timing_gen) # first one returned is the time unit (str) events = [] l0ids_loaded = set() with open(filename, "rb") as ifile: current_event = None filesize = os.stat(filename).st_size for _ in range(0, filesize, 9): data = ifile.read(9) if len(data) != 9: raise Exception(f"Malformed event data file {filename}") fmt = {"little": "<?Q", "big": ">?Q"}[endian] is_metadata, contents = struct.unpack(fmt, data) word = DataWord(contents, is_metadata) ## ## timestamp ## if timing_gen: try: word.set_timestamp(next(timing_gen), units=time_units) except StopIteration: raise Exception( f"ERROR Timing file for loaded data file (={filename}) has incorrect number of words in it!" ) if word.is_event_header_start(): if n_to_load > 0 and len(events) >= n_to_load: break if l0id_request > 0 and int(l0id_request) in l0ids_loaded: break header = DataFormat.BitFieldWordValue(DataFormat.EVT_HDR1, contents) current_event = DataEvent(header.getField("L0ID")) l0ids_loaded.add(int(current_event.l0id)) if current_event is not None: current_event.add_word(word) if word.is_event_footer_start(): events.append(current_event) if int(l0id_request) > 0 and len(events) > 0: tmp = [] for event in events: if int(event.l0id) == int(l0id_request): tmp.append(event) break events = tmp for event in events: event.parse() return events
def _parse(self, words): h0 = DataFormat.BitFieldWordValue(DataFormat.M_HDR, value=words[0].contents) h1 = DataFormat.BitFieldWordValue(DataFormat.M_HDR2) self._header = [h0, h1] data_type = self._header[0].getField("TYPE") data_type_raw = DataFormat.M_HDR_TYPE.RAW data_type_clus = DataFormat.M_HDR_TYPE.CLUSTERED det_type = self._header[0].getField("DET") det_type_pix = DataFormat.M_HDR_DET.PIXEL det_type_strip = DataFormat.M_HDR_DET.STRIP expectfooter = False for iword, word in enumerate(words[1:]): unpacker = SubwordUnpacker(word.contents, DataFormat.WORD_LENGTH) empty = False ## ## raw ## if data_type == data_type_raw: raw_length = { det_type_pix: DataFormat.PIXEL_RAW_BITS, det_type_strip: DataFormat.HCC_CLUSTER.nbits, }[det_type] if iword == 0: val, empty = unpacker.get(DataFormat.M_HDR2.nbits) h1.value = val while not empty: val, empty = unpacker.get(raw_length) self._cluster_data.append(val) ## ## clustered data ## if data_type == data_type_clus: clus_word_format = { det_type_pix: DataFormat.PIXEL_CLUSTER, det_type_strip: DataFormat.STRIP_CLUSTER, }[det_type] clus_word_length = clus_word_format.nbits clus_footer_format = { det_type_pix: DataFormat.PIXEL_CL_FTR, det_type_strip: DataFormat.STRIP_CL_FTR, }[det_type] clus_footer_length = clus_footer_format.nbits if iword == 0: val, empty = unpacker.get(DataFormat.M_HDR2.nbits) h1.value = val ## ## here we handle mis-shapened words ## # test for empty module if len(words[1:]) == 1: tmp_words = [] while not empty: val, empty = unpacker.get(clus_word_length) tmp_words.append(val) for tw in tmp_words: flag = (tw >> (clus_word_length - 8)) & 0xFF if flag == DataFormat.CL_FTR_FLAG.FLAG: self._footer = DataFormat.BitFieldWordValue( clus_footer_format, tw ) else: self._cluster_data.append( DataFormat.BitFieldWordValue(clus_word_format, tw) ) else: while not empty: if (not expectfooter) and (self.footer is None): val, empty = unpacker.get(clus_word_length) cluster_val = DataFormat.BitFieldWordValue( clus_word_format, val ) self._cluster_data.append(cluster_val) expectfooter = cluster_val.getField("LAST") == 1 else: val, empty = unpacker.get(clus_footer_length) expectfooter = False if self._footer is None: self._footer = DataFormat.BitFieldWordValue( clus_footer_format, val ) break ## ## if not doing mis-shapened word handling, uncomment this ## # while not empty : # if (not expectfooter) and (self.footer == None) : # val, empty = unpacker.get(clus_word_length) # cluster_val = DataFormat.BitFieldWordValue(clus_word_format, val) # self._cluster_data.append(cluster_val) # expectfooter = cluster_val.getField("LAST") == 1 # else : # val, empty = unpacker.get(clus_footer_length) # expectfooter = False # if not self._footer : # self._footer = DataFormat.BitFieldWordValue(clus_footer_format, val) # break if data_type == data_type_clus and ( self._footer is None or self._footer == 0x0 ): print("WARNING Failed to find MODULE FOOTER for clustered data") self._parse_header()
def dump_evt_file( input_filename, n_events=0, l0id_to_load=[], do_boundary=False, do_parse=False, do_timestamp=False, do_total_word_count=False, do_event_word_count=False, list_l0id=False, ): """ Loads the events from the input file "input_filename" and dumps the data words to standard out. """ # just to be sure that the list contains type int l0id_to_load = [int(l0id) for l0id in l0id_to_load] s_in_module = False s_in_event_footer = False module_type = "" n_events_loaded = 0 n_footer_count = 0 n_module_count = 0 dump_word = True n_word_read_total = -1 n_word_read_event = -1 l0ids_loaded = [] header_word_start_idx = 0 footer_word_start_idx = 0 module_word_start_idx = 0 ## ## timing ## timing_gen = None time_units = None if do_timestamp: timing_gen = timing_info_gen(input_filename) time_units = next(timing_gen) filesize = os.stat(input_filename).st_size with open(input_filename, "rb") as input_file: for _ in range(0, filesize, N_BYTES_PER_WORD): data = input_file.read(N_BYTES_PER_WORD) if len(data) != N_BYTES_PER_WORD: raise Exception( f"ERROR Malformed event data file {input_filename}") fmt = {"little": "<?Q", "big": ">?Q"}[DATA_ENDIAN] is_metadata, contents = struct.unpack(fmt, data) data_word = DataWord(contents, is_metadata) dump_word = True n_word_read_total += 1 n_word_read_event += 1 if n_word_read_total == 0 and not data_word.is_start_of_event(): raise Exception( f"ERROR Malformed event data file (={input_filename}): first word is not start of event!" ) if timing_gen: try: data_word.set_timestamp(next(timing_gen), units=time_units) except StopIteration: raise Exception( f"ERROR Timing file for loaded data file (={input_filename}) has incorrect number of words in it!" ) ## ## TRANSITION TO EVENT (HEADER) STATE ## if data_word.is_start_of_event(): header_word_start_idx = n_word_read_total footer_word_start_idx = -1 module_word_start_idx = -1 module_type = "" n_word_read_event = 0 if s_in_event_footer and n_footer_count >= 3: n_events_loaded += 1 if n_footer_count > 3: print( f"WARNING Length of EVENT_FOOTER appears to be large (expect: {N_WORDS_FOR_EVENT_FOOTER} words, got: {n_footer_count} words)" ) if n_events and n_events_loaded >= n_events: break s_in_module = False s_in_event_footer = False n_footer_count = 0 n_module_count = 0 n_word_read_event = 0 ## ## TRANSITION TO MODULE STATE ## if (data_word.is_metadata and (not data_word.is_start_of_event()) and (not data_word.is_event_footer_start())): s_in_module = True s_in_event_footer = False seen_last_cluster = False module_footer = None module_word = DataFormat.BitFieldWordValue( DataFormat.M_HDR, data_word.contents) module_type = { 0: "pix", 1: "strip" }[module_word.getField("DET")] module_word_start_idx = n_word_read_total ## ## TRANSITION TO EVENT FOOTER STATE ## if data_word.is_event_footer_start(): s_in_module = False s_in_event_footer = True footer_word_start_idx = n_word_read_total ## ## keep count of long (many words) we have been in the header ## so that we know when we are finished with the current ## event (the footer is a fixed number of words) ## if s_in_event_footer: n_footer_count += 1 ## ## get the current event's L0ID from the event header ## if data_word.is_start_of_event(): header = DataFormat.BitFieldWordValue(DataFormat.EVT_HDR1, data_word.contents) current_l0id = header.getField("L0ID") l0ids_loaded.append(current_l0id) ## ## determine if we need to dump the current word based on some ## input arguments ## if len(l0id_to_load): if int(current_l0id) not in l0id_to_load: dump_word = False else: dump_word = True if list_l0id: dump_word = False ## ## dump the word (this gets messy due to the different options of detail in the printout) ## if dump_word: extra_before = "" extra_after = "" boundary_string = "" ## ## print event boundary markers/separators ## (event header, module header, event footer) ## if do_boundary: is_module = data_word.is_metadata and not ( data_word.is_start_of_event() or data_word.is_event_footer_start()) word_width = len(str(data_word)) d_string = "" if do_total_word_count: word_width += 11 if do_event_word_count: word_width += 7 if do_timestamp: word_width += 15 if do_parse: word_width += 55 sep = "" if data_word.is_start_of_event(): sep = "=" b_string = word_width * sep d_string = f"[EVENT {n_events_loaded:03}]" boundary_string = f"{b_string} {d_string}" elif is_module: sep = "-" b_string = word_width * sep d_string = f"[MODULE {n_events_loaded:03}/{n_module_count:03}]" boundary_string += f"{b_string} {d_string}" n_module_count += 1 elif data_word.is_event_footer_start(): sep = "-" b_string = word_width * sep d_string = f"[FOOTER {n_events_loaded:03}]" boundary_string = f"{b_string} {d_string}" if boundary_string: print(boundary_string) ## ## construct the string to print in front of (to the left of) ## the printed hex-formatted data word ## if do_total_word_count: extra_before += f"{n_word_read_total:<9}" if do_timestamp: extra_before += f"{data_word.timestamp:<13}" + " " ## ## construct the string to print after (to the right of) the ## printed hex-formatted data word ## ## this part gets kind of messy due to the decoding of the data words ## on the fly, especially for finding the module last cluster and footer field_str = "" if do_parse: if header_word_start_idx >= 0: header_idx = n_word_read_total - header_word_start_idx if header_idx < N_WORDS_FOR_EVENT_HEADER: header_field_desc = [ DataFormat.EVT_HDR1, DataFormat.EVT_HDR2, DataFormat.EVT_HDR3, DataFormat.EVT_HDR4, DataFormat.EVT_HDR5, DataFormat.EVT_HDR6, ][header_idx] header_word = DataFormat.BitFieldWordValue( header_field_desc, data_word.contents) field_str = [] for field in header_field_desc.fields: if field.name.lower() == "spare": continue field_str.append( f"{field.name.upper()}: {hex(header_word.getField(field.name))}" ) field_str = ", ".join(field_str) else: header_word_start_idx = -1 if module_word_start_idx >= 0: module_idx = n_word_read_total - module_word_start_idx if module_idx < N_WORDS_FOR_MODULE_HEADER: module_field_desc = [ DataFormat.M_HDR, DataFormat.M_HDR2 ][module_idx] # DataFormat breaks its own naming conventions module_data = data_word.contents if module_idx == 1: module_data = 0xFFFFFFFF & (module_data >> 32) module_word = DataFormat.BitFieldWordValue( module_field_desc, module_data) field_str = [] for field in module_field_desc.fields: if field.name.lower() == "spare": continue field_str.append( f"{field.name.upper()}: {hex(module_word.getField(field.name))}" ) field_str = ", ".join(field_str) else: module_word_start_idx = -1 if s_in_module and (module_word_start_idx < 0): word_desc = { "pix": DataFormat.PIXEL_CLUSTER, "strip": DataFormat.STRIP_CLUSTER, }[module_type] cluster_data_str = str(data_word).replace("0x", "")[ 1:] # remove '0x' string and skip metadata flag cluster_words = [] if module_type == "pix": cluster_words.append(int(cluster_data_str[:8], 16)) cluster_words.append(int(cluster_data_str[8:], 16)) elif module_type == "strip": cluster_words.append(int(cluster_data_str[:4], 16)) cluster_words.append(int(cluster_data_str[4:8], 16)) cluster_words.append( int(cluster_data_str[8:12], 16)) cluster_words.append( int(cluster_data_str[12:16], 16)) cluster_words = [ DataFormat.BitFieldWordValue(word_desc, x) for x in cluster_words ] if not seen_last_cluster and module_footer is None: for i, clw in enumerate(cluster_words): is_last = clw.getField("LAST") == 1 if is_last: seen_last_cluster = True continue # skip to next if seen_last_cluster: module_footer = cluster_words[i] break else: module_footer = None elif seen_last_cluster and module_footer is None: module_footer = cluster_words[0] else: raise Exception( "ERROR Failed to parse module footer properly") if seen_last_cluster and module_footer: field_str = [] footer_desc = { "pix": DataFormat.PIXEL_CL_FTR, "strip": DataFormat.STRIP_CL_FTR, }[module_type] module_footer = DataFormat.BitFieldWordValue( footer_desc, module_footer.value) if module_footer.getField("FLAG") == 0x77: for field in module_footer.classobj.fields: if field.name.lower() == "spare": continue field_str.append( f"{field.name.upper()}: {hex(module_footer.getField(field.name))}" ) field_str = ", ".join(field_str) if footer_word_start_idx >= 0: module_type = "" footer_idx = n_word_read_total - footer_word_start_idx if footer_idx < N_WORDS_FOR_EVENT_FOOTER: footer_field_desc = [ DataFormat.EVT_FTR1, DataFormat.EVT_FTR2, DataFormat.EVT_FTR3, ][footer_idx] footer_word = DataFormat.BitFieldWordValue( footer_field_desc, data_word.contents) field_str = [] for field in footer_field_desc.fields: if field.name.lower() == "spare": continue field_str.append( f"{field.name.upper()}: {hex(footer_word.getField(field.name))}" ) field_str = ", ".join(field_str) else: footer_word_start_idx = -1 ## ## build the final strings to pre-pend (append) to the left (right) of the ## hex-string formatted data word ## if do_event_word_count: if len(extra_after): extra_after += " " extra_after += f"{n_word_read_event:<4}" if field_str: if len(extra_after): extra_after += " " extra_after += f"{field_str}" if extra_before: extra_before += " " if extra_after: extra_after = 3 * " " + extra_after ## ## now print it ## word_string = f"{extra_before}{data_word}{extra_after}" print(word_string) if list_l0id: print(20 * "-") print(f"Loaded {len(l0ids_loaded)} events:") for i, l in enumerate(l0ids_loaded): l_str = hex(l) print(f" {i:<4}: {l_str}") print(20 * "-")
#!/usr/bin/python # -*- coding: utf-8 -*- #用于访问OKCOIN 期货REST API from RESTAPI import RESTAPI import pandas as pd import time import datetime from dateutil.parser import parse from decimal import Decimal from dateutil.relativedelta import relativedelta import api_settings as settings from DataFormat import DataFormat import base fmt = DataFormat() class OKCoinFuture(RESTAPI): def __init__(self, apiname='OKEX'): key, secret = settings.APIKEY[apiname] super(OKCoinFuture, self).__init__(key, secret, apiname, use_proxy=False) def _format(self, res, *args, **kwards): res = self.fmt.to_dataframe(res, *args, **kwards) res.columns = [s.upper() for s in res.columns] return res