def handle_hk_pkt(self, data, process_id): # TODO: Update packet handling if necessary tx_flag = False if (process_id == self.HK_PAT_ID): pat_pkt = PATHealthPacket() apid = TLM_HK_PAT payload, tx_flag, _, _, _, _ = pat_pkt.decode(data) payload = "(" + str(datetime.datetime.now())[0:23] + ") " + payload #payload = struct.pack('%ds'%len(payload), payload) #for readability, could have this, though it doesn't do anything (packed string = original string) # print('Handling PAT pkt w/ payload: ', payload) elif (process_id == self.HK_FPGA_ID): apid = TLM_HK_FPGA_MAP payload = data #data is already a packed byte string # print('Handling FPGA pkt w/ payload: ', payload) elif (process_id == self.HK_SYS_ID): apid = TLM_HK_SYS payload = data #data is already a packed byte string # print('Handling SYS pkt w/ payload: ', payload) elif (process_id == self.HK_CH_ID): apid = TLM_HK_CH ch_pkt = HKControlPacket() origin, _, data = ch_pkt.decode(data) # TODO: Maybe format this better payload = "(" + str(datetime.datetime.now())[0:23] + ") " + str(origin) + ": " + data #payload = struct.pack('%ds'%len(payload), payload) #for readability, could have this, though it doesn't do anything (packed string = original string) # print('Handling CH pkt w/ payload: ', payload) if ((process_id != self.HK_PAT_ID) or tx_flag): pkt = TxPacket() raw_pkt = pkt.encode(apid, payload) #payload needs to be a single packed byte string e.g. '\x00\x01' self.packet_buf.append(raw_pkt)
def validate_file(rx_pkt_payload, socket_tx_packets): req_raw_size = len(rx_pkt_payload) - 18 file_hash, file_name_len, file_name_payload = struct.unpack( '!%dsH%ds' % (16, req_raw_size), rx_pkt_payload) file_name = file_name_payload[0:file_name_len] success = True try: hash_func = hashlib.md5() with open(file_name, 'rb') as source_file: buf = source_file.read( 1024) # Hash block size is 1024, change if necessary while (len(buf) > 0): hash_func.update(buf) buf = source_file.read(1024) check_hash = hash_func.digest() if (check_hash != file_hash): #print('Hash Check Failed!') err_pkt = TxPacket() pkt_payload = '' pkt_payload += struct.pack('!16s', check_hash) pkt_payload += struct.pack('!16s', file_hash) pkt_payload += struct.pack('!H', file_name_len) pkt_payload += struct.pack('!%ds' % file_name_len, file_name) raw_err_pkt = err_pkt.encode(ERR_FL_FILE_INVALID, pkt_payload) socket_tx_packets.send(raw_err_pkt) success = False except Exception as e: send_exception(socket_tx_packets, e) success = False return success
def restart_process(self, process_id, instance_num): # print("Restart process %x" % process_id) try: if (process_id == self.HK_CH_ID and self.ch_restart_enable): print("Restart ch %d" % instance_num) # TODO: switch this for multiple commandhandlers os.system("systemctl --user restart commandhandler@%d" % instance_num) ch_pid = self.get_service_pid('commandhandler@%d' % instance_num) # os.system("systemctl --user restart commandhandler") # ch_pid = self.get_service_pid('commandhandler') # Update the instance/pid list old_ch_pid = self.ch_pids[instance_num] self.ch_pids[instance_num] = ch_pid self.ch_heartbeat_wds[old_ch_pid].cancel() old_wd = self.ch_heartbeat_wds.pop(old_ch_pid) self.ch_heartbeat_wds[ch_pid] = old_wd self.ch_heartbeat_wds[ch_pid].start() if (process_id == self.HK_PAT_ID and self.pat_restart_enable): print("Restart pat") os.system("systemctl --user restart pat.service") self.pat_health_wd.cancel() self.pat_health_wd.start() if (process_id == self.HK_LB_ID and self.lb_restart_enable): print("Restart load balancer") os.system("systemctl --user restart loadbalancer.service") self.lb_heartbeat_wd.cancel() self.lb_heartbeat_wd.start() os.system("systemctl --user restart commandhandlers.target") for i in range(COMMAND_HANDLERS_COUNT): # TODO: switch this for multiple commandhandlers ch_pid = self.get_service_pid('commandhandler@%d' % i) # ch_pid = self.get_service_pid('commandhandler') old_ch_pid = self.ch_pids[i] self.ch_pids[i] = ch_pid self.ch_heartbeat_wds[old_ch_pid].cancel() old_wd = self.ch_heartbeat_wds.pop(old_ch_pid) self.ch_heartbeat_wds[ch_pid] = old_wd self.ch_heartbeat_wds[ch_pid].start() if (process_id == self.HK_FPGA_ID and self.fpga_restart_enable): print("Restart fpga") os.system("systemctl --user restart fpga.service") err_pkt = TxPacket() raw_err_pkt = err_pkt.encode(ERR_HK_RESTART, struct.pack('!BB', process_id, instance_num)) self.packet_buf.append(raw_err_pkt) except Exception as e: send_exception(self.tx_socket, e)
def acquire_bus_pkt(self): # returns a full bus packet without the sync marker buf = [] # Read 1 byte from SPI device until the sync marker is found sync_index = 0 while (sync_index < (len(self.ccsds_sync))): b = self.read_data(1) if (b[0] == self.ccsds_sync[sync_index]): # buf.append(b) sync_index += 1 elif (b[0] == self.ccsds_sync[0]): # buf = [b] sync_index = 1 else: sync_index = 0 #print('found sync!') # Read 6 CCSDS header bytes buf = self.read_data(CCSDS_HEADER_LEN) apid = buf[APID_INDEX] pkt_len = (buf[PKT_LEN_INDEX] << 8) | buf[PKT_LEN_INDEX + 1] + 1 # Read payload data bytes and crc bytes pkt = self.read_data(pkt_len) # Assuming crc is included in the packet length buf.extend(pkt) # Check crc crc_index = CCSDS_HEADER_LEN + pkt_len - 2 crc = (buf[crc_index] << 8) | buf[crc_index + 1] #Calculate CRC over the entire packet crcinst = crc16() crc_check = crc16.calc(buf[:crc_index]) if (crc == crc_check): self.bus_pkts_buffer.append(buf) else: # print('crc did not work') err_pkt = TxPacket() err_pkt_pl = struct.pack('!H%dB' % len(buf), len(buf), *buf) raw_err_pkt = err_pkt.encode(ERR_DPKT_CRC_INVALID, err_pkt_pl) self.tx_socket.send(raw_err_pkt)
def handle_tx_pkts(self): try: raw_ipc_pkt = self.ipc_pkts_buffer.pop(0) except IndexError as e: # Empty buffer, but that's ok return ipc_pkt = TxPacket() apid, pkt_data = ipc_pkt.decode(raw_ipc_pkt) # print('apid: ', apid) # print('pkt_data: ', pkt_data) # 0b00 - continuation, 0b01 - first of group, 0b10 - last of group, 0b11 - standalone seq_cnt = 0 seq_flag = 0b01 while (len(pkt_data) > (BUS_DATA_LEN)): sync = [] sync.append(0x35) sync.append(0x2E) sync.append(0xF8) sync.append(0x53) pkt = [] pkt.append((apid >> 8) & 0b00000111) pkt.append(apid & 0xFF) pkt.append((seq_flag << 6) | ((seq_cnt >> 8) & 0b00111111)) pkt.append(seq_cnt & 0xFF) pkt.append(((BUS_DATA_LEN + 1) >> 8) & 0xFF) pkt.append((BUS_DATA_LEN + 1) & 0xFF) pkt.extend(bytearray(pkt_data[:BUS_DATA_LEN])) crc = crc16.calc(pkt) pkt.extend([crc >> 8, crc & 0xFF]) bus_tx_pkt = [] bus_tx_pkt.extend(sync) bus_tx_pkt.extend(pkt) # print('bus_tx_pkt: ', bus_tx_pkt) #for debug self.bus_pkts_buffer.append(bus_tx_pkt) del pkt_data[:BUS_DATA_LEN] seq_cnt += 1 seq_flag = 0b00 #Last or only packet if (seq_flag == 0b01): seq_flag = 0b11 else: seq_flag = 0b10 ###Start Revised Packet Definition - Tested and Works### sync = [] sync.append(0x35) sync.append(0x2E) sync.append(0xF8) sync.append(0x53) pkt = [] pkt.append((apid >> 8) & 0b00000111) pkt.append(apid & 0xFF) pkt.append((seq_flag << 6) | ((seq_cnt >> 8) & 0b00111111)) pkt.append(seq_cnt & 0xFF) pkt.append( ((len(pkt_data) + 1) >> 8) & 0xFF) #include length of CRC = 2 pkt.append((len(pkt_data) + 1) & 0xFF) #include length of CRC = 2 pkt.extend(bytearray(pkt_data[:BUS_DATA_LEN])) crc = crc16.calc( pkt ) #do not include sync bytes in this calculation (hence the separation of sync and pkt) pkt.extend([crc >> 8, crc & 0xFF]) bus_tx_pkt = [] bus_tx_pkt.extend(sync) bus_tx_pkt.extend(pkt) # print('bus_tx_pkt: ', bus_tx_pkt) #for debug ###End Revised Packet Definition### self.bus_pkts_buffer.append(bus_tx_pkt)
#from zmqTxRx import recv_zmq #send_zmq context = zmq.Context() socket_tx_packets = context.socket(zmq.SUB) socket_tx_packets.bind("tcp://127.0.0.1:%s" % TX_PACKETS_PORT) socket_tx_packets.subscribe("") # socket.setsockopt(zmq.SUBSCRIBE, topicfilter) # subscribe to ALL incoming FPGA_map_requests # ~ socket_PAT_control = context.socket(zmq.PUB) # ~ socket_PAT_control.bind("tcp://*:%s" % PAT_CONTROL_PORT) # socket needs some time to set up. give it a second - else the first message will be lost time.sleep(1) print("\n") while True: #Continuously read telemetry print('RECEIVING on %s with TIMEOUT %d' % (socket_tx_packets.get_string( zmq.LAST_ENDPOINT), socket_tx_packets.get(zmq.RCVTIMEO))) message = socket_tx_packets.recv() ipc_txPacket = TxPacket() apid, payload = ipc_txPacket.decode(message) #decode the package print(apid) #print(payload) data = struct.unpack('!3f', payload) print(data) time.sleep(1)
def assemble_file(rx_pkt_payload, socket_tx_packets): req_raw_size = len(rx_pkt_payload) - 4 transfer_id, file_name_len, file_name_payload = struct.unpack( '!HH%ds' % req_raw_size, rx_pkt_payload) #print('assemble_file - received transfer_id: ', transfer_id) file_name = file_name_payload[0:file_name_len] #print('assemble file - file_name: ', file_name) missing_chunks = [] received_chunks = [] pkt_data = '' success = True try: # FOR TEST: # all_files = [f for f in os.listdir('test_file_staging/'+str(transfer_id)+'/') if f.endswith('.chunk')] # chunk_files = sorted(all_files, key=lambda s: int(s.split('_')[0])) all_files = [ f for f in os.listdir('/root/file_staging/' + str(transfer_id) + '/') if f.endswith('.chunk') ] chunk_files = sorted(all_files, key=lambda s: int(s.split('_')[0])) num_chunk_files = len(chunk_files) # Check that the directory isn't empty if not chunk_files: # Dir is empty -> no chunks are received yet raise FileError(FL_ERR_EMPTY_DIR) # Assume the first chunk has the correct sequence count _, seq_len = chunk_files[0].split('_') seq_len_all = int(seq_len[:-6]) # FOR TEST: # chunk_size_all = os.stat('test_file_staging/'+str(transfer_id)+'/'+chunk_files[0]).st_size chunk_size_all = os.stat('/root/file_staging/' + str(transfer_id) + '/' + chunk_files[0]).st_size chunk_name_pattern = re.compile('\d{1,5}_\d{1,5}.chunk') #Verify chunk file names for i in range(num_chunk_files): # Check file names are valid if chunk_name_pattern.match(chunk_files[i]) is None: # Chunk file name is not correct raise FileError(FL_ERR_FILE_NAME) # Check file names are consistent seq_num, seq_len = chunk_files[i].split('_') seq_num = int(seq_num) seq_len = int(seq_len[:-6]) if seq_len != seq_len_all: # Sequence count doesn't match raise FileError(FL_ERR_SEQ_LEN) received_chunks.append(seq_num) #Check if missing chunks if (seq_len_all != num_chunk_files): for i in range(seq_len_all): seq_num_check = i + 1 if (seq_num_check not in received_chunks): missing_chunks.append(seq_num_check) raise FileError(FL_ERR_MISSING_CHUNK) #Write to file with safe_open_w(file_name) as out_file: for i in range(num_chunk_files): # FOR TEST: # chunk_size = os.stat('test_file_staging/'+str(transfer_id)+'/'+chunk_files[i]).st_size chunk_size = os.stat('/root/file_staging/' + str(transfer_id) + '/' + chunk_files[i]).st_size # FOR TEST: # with open('test_file_staging/'+str(transfer_id)+'/'+chunk_files[i], 'rb') as curr_chunk: with open( '/root/file_staging/' + str(transfer_id) + '/' + chunk_files[i], 'rb') as curr_chunk: # Read the entire chunk, may be better to buffer? out_file.write(curr_chunk.read()) except FileError as e: #print('assemble_file - FileError') #print('transfer_id: ', transfer_id) #print('Error: ', e) pkt_data = format_err_response(transfer_id, e, missing_chunks) success = False except Exception as e: #print('assemble_file - Exception: ', e) send_exception(socket_tx_packets, e) pkt_data = format_null_response(transfer_id) success = False else: #print('assemble_file - else') pkt_data = format_success_response(transfer_id) finally: # FOR TEST # print(pkt_data) # print(binascii.hexlify(bytearray(pkt_data))) #print('assemble_file - pkt_data: ', pkt_data) tx_pkt = TxPacket() raw_tx_pkt = tx_pkt.encode( TLM_ASSEMBLE_FILE, pkt_data ) #pkt_data is a single byte string (e.g. the output of struct.pack) socket_tx_packets.send(raw_tx_pkt) return success
def request_file(rx_pkt_payload, socket_tx_packets): transfer_id, all_flag, chunk_start_index, num_chunks = struct.unpack( '!HBHH', rx_pkt_payload) try: # FOR TEST: # all_files = [f for f in os.listdir('test_file_staging/'+str(transfer_id)+'/') if f.endswith('.chunk')] # chunk_files = sorted(all_files, key=lambda s: int(s.split('_')[0])) all_files = [ f for f in os.listdir('/root/file_staging/' + str(transfer_id) + '/') if f.endswith('.chunk') ] chunk_files = sorted(all_files, key=lambda s: int(s.split('_')[0])) # Dir is empty -> error if not chunk_files: raise FileError(FL_ERR_EMPTY_DIR) # Get the full sequence length from the first packet chunk_name_pattern = re.compile('\d{1,5}_\d{1,5}.chunk') if chunk_name_pattern.match(chunk_files[0]) is None: # Chunk file name is not correct raise FileError(FL_ERR_FILE_NAME) _, raw_seq_len = chunk_files[0].split('_') seq_len = int(raw_seq_len[:-6]) if (all_flag == 0xFF): chunk_start_index = 1 num_chunks = seq_len # Check that you're not requesting out of bounds if (chunk_start_index + num_chunks - 1 > seq_len): raise FileError(FL_ERR_OUT_OF_BOUNDS) # Retrieve hash hash_file_name = '/root/file_staging/' + str( transfer_id) + '/' + 'md5.hash' # hash_file_name = 'test_file_staging/'+str(transfer_id)+'/'+'md5.hash' with open(hash_file_name, 'rb') as hash_file: file_hash = hash_file.read() for i in range(chunk_start_index, chunk_start_index + num_chunks): # FOR TEST: file_name = '/root/file_staging/' + str(transfer_id) + '/' + str( i) + '_' + str(seq_len) + '.chunk' # file_name = 'test_file_staging/'+str(transfer_id)+'/'+str(i)+'_'+str(seq_len)+'.chunk' with open(file_name, 'rb') as chunk_file: chunk_size = os.stat(file_name).st_size packet_payload = chunk_file.read() packet = struct.pack('!H%dsHHH%ds' % (16, chunk_size), transfer_id, file_hash, i, seq_len, chunk_size, packet_payload) # print(i, seq_len) txpacket = TxPacket() raw_packet = txpacket.encode(APID=TLM_DL_FILE, payload=packet) socket_tx_packets.send(raw_packet) return True except Exception as e: send_exception(socket_tx_packets, e) return False
def disassemble_file(rx_pkt_payload, socket_tx_packets): req_raw_size = len(rx_pkt_payload) - 6 transfer_id, chunk_size, file_name_len, file_name_payload = struct.unpack( '!HHH%ds' % req_raw_size, rx_pkt_payload) file_name = file_name_payload[0:file_name_len] try: hash_func = hashlib.md5() with open(file_name, "rb") as source_file: buf = source_file.read( 1024) # Hash block size is 1024, change if necessary while (len(buf) > 0): hash_func.update(buf) buf = source_file.read(1024) file_hash = hash_func.digest() hash_file_name = '/root/file_staging/' + str( transfer_id) + '/' + 'md5.hash' # hash_file_name = 'test_file_staging/'+str(transfer_id)+'/'+'md5.hash' with safe_open_w(hash_file_name) as hash_file: hash_file.write(file_hash) with open(file_name, 'rb') as source_file: file_len = os.stat(file_name).st_size seq_len = int(math.ceil(float(file_len) / chunk_size)) seq_num = 1 while (seq_num * chunk_size) < file_len: chunk_data = source_file.read(chunk_size) chunk_file_name = '/root/file_staging/' + str( transfer_id) + '/' + str(seq_num) + '_' + str( seq_len) + '.chunk' # chunk_file_name = 'test_file_staging/'+str(transfer_id)+'/'+str(seq_num)+'_'+str(seq_len)+'.chunk' with safe_open_w(chunk_file_name) as chunk_file: chunk_file.write(chunk_data) seq_num += 1 if (((seq_num - 1) * chunk_size) < file_len): chunk_data_len = file_len - ((seq_num - 1) * chunk_size) chunk_data = source_file.read(chunk_data_len) chunk_file_name = '/root/file_staging/' + str( transfer_id) + '/' + str(seq_num) + '_' + str( seq_len) + '.chunk' # chunk_file_name = 'test_file_staging/'+str(transfer_id)+'/'+str(seq_num)+'_'+str(seq_len)+'.chunk' with safe_open_w(chunk_file_name) as chunk_file: chunk_file.write(chunk_data) # Send telemetry txpacket = TxPacket() raw_packet = txpacket.encode(APID=TLM_DISASSEMBLE_FILE, payload=struct.pack( '!HH', transfer_id, seq_num)) socket_tx_packets.send(raw_packet) #Memory Management manage_file_staging() return True except Exception as e: send_exception(socket_tx_packets, e) return False