def test_parser_speed(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_parser_thread() input_length = 10000 chunk_size = 20 with dc.expected_readings_parsed_lock: dc.expected_readings_parsed = input_length normal_bytearray_chunk = bytearray(0) for i in range(chunk_size): normal_bytearray_chunk += normal_bytearray start = time.time() for i in range(input_length / chunk_size): dc.pipeline_sender.send(normal_bytearray_chunk) with dc.reading_available_to_parse_cond: dc.reading_available_to_parse_cond.notify() with dc.parser_done_cond: dc.parser_done_cond.wait() elapsed = time.time() - start speed = 1 / (elapsed / input_length) print '\n\nParser Stage: effective frequency over %d samples is %d Hz\n' % ( input_length, speed)
def test_recovery_missing_byte(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_recovery_thread() # play the normal_sequence twice so we get in sync, but skip the last byte for x in normal_sequence: dc.incoming_queue.put(x) for i in range(len(normal_sequence) - 1): dc.incoming_queue.put(normal_sequence[i]) # play the normal_sequence once again for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert not dc.synchronized # play the normal_sequence once again, this is where we recover for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert dc.synchronized
def test_recovery_random_dead(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_recovery_thread() # play the normal_sequence twice so we get in sync for i in range(2): for x in normal_sequence: dc.incoming_queue.put(x) # play the normal_sequence with a random DEAD to break sync for x in dead_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert not dc.synchronized # play the normal_sequence again twice, this is where we recover for i in range(2): for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert dc.synchronized
def test_recovery_added_byte(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_recovery_thread() # play the normal_sequence twice so we get in sync for i in range(2): for x in normal_sequence: dc.incoming_queue.put(x) # throw a wrench in the pipeline dc.incoming_queue.put('c') # play the normal_sequence once again for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert not dc.synchronized # play the normal_sequence once again, this is where we recover for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert dc.synchronized
def __init__(self, ip): self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB) self.log.info('[MAIN THREAD] Instantiated client') self.receiving = False self.define_headers() self.targets = {} self.transmit = Queue.Queue() self.data_client = DataClient(self.transmit, ip) self.data_processor = DataProcessor(self.transmit, self.headers, self.targets) self.connect(ip)
def test_initial_sync(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_verification_thread() # play the normal_sequence twice so we get in sync for i in range(2): for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert dc.synchronized
def test_sync_verify_speed(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_verification_thread() dc.start_parser_thread() dc.synchronized = True input_length = 10000 chunk_size = 20 with dc.expected_readings_verified_lock: dc.expected_readings_verified = input_length / chunk_size normal_bytearray_chunk = bytearray(0) for i in range(chunk_size): normal_bytearray_chunk += normal_bytearray start = time.time() for i in range(input_length / chunk_size): dc.fast_path_sender.send(normal_bytearray_chunk) with dc.frame_to_be_verified_cond: dc.frame_to_be_verified_cond.notify() with dc.sync_filter_done_cond: dc.sync_filter_done_cond.wait() elapsed = time.time() - start speed = 1 / (elapsed / input_length) print '\n\nSync Verification Stage: effective frequency over %d samples is %d Hz\n' % ( input_length, speed)
def __init__(self, ip, transmit): if not os.path.isdir(DATA_DIR): os.makedirs(DATA_DIR) # Logging self.log = Logger(CLIENT_LOG_FILE, D_VERB) self.log.info('[MAIN THREAD] Instantiated client') # Central data self.receiving = False self.training = False self.paused = False self.define_headers() self.targets = {} # Workers self.data_client = DataClient(transmit, ip) # Connection self.connect(ip)
def test_sync_to_parser_handoff(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) dc.start_sync_recovery_thread() dc.start_parser_thread() # play the normal_sequence twice so we get in sync for i in range(3): for x in normal_sequence: dc.incoming_queue.put(x) time.sleep(0.2) assert not dc.storage_queue.empty() assert not dc.gui_queue.empty()
def test_receive_recovery_speed(self): dc = DataClient(storage_sender, gui_data_sender, reading_to_be_stored_cond, readings_to_be_plotted_cond) server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 10002)) server_sock.listen(1) print 'listening on %s:%d' % ('localhost', 10002) dc.connect_data_port('localhost', 10002) conn, addr = server_sock.accept() print 'accepted connection from %s:%d' % (addr[0], addr[1]) input_length = 1000 bytes_sent = 0 start = time.time() for i in range(input_length): for j in range(len(normal_reading)): bytes_sent += conn.send(np.uint16(normal_reading[j])) with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = bytes_sent dc.receiver_done_event.wait() elapsed = time.time() - start speed = 1 / (elapsed / input_length) print '\n\nReceive Recover Stage: effective frequency over %d samples is %d Hz\n' % ( input_length, speed) dc.close_data_port() conn.close() server_sock.close()
def test_recv_and_verify_speed(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 10002)) server_sock.listen(1) print 'listening on %s:%d' % ('localhost', 10002) dc.connect_data_port() conn, addr = server_sock.accept() print 'accepted connection from %s:%d' % (addr[0], addr[1]) input_length = 1000 with dc.expected_readings_verified_lock: dc.expected_readings_verified = input_length - 2 start = time.time() for i in range(input_length): for j in range(len(normal_reading)): conn.send(np.uint16(normal_reading[j])) with dc.sync_filter_done_cond: dc.sync_filter_done_cond.wait() elapsed = time.time() - start speed = 1 / (elapsed / input_length) print '\n\nReceive and Verify Stages: effective frequency over %d samples is %d Hz\n' % ( input_length, speed) dc.close_data_port() conn.close() server_sock.close()
class NetworkController(mp.Process): def __init__(self, storage_sender, gui_control_conn, gui_data_queue, file_header_sender, file_header_available_event, reading_to_be_stored_event, readings_to_be_plotted_event, control_msg_from_gui_event, control_msg_from_nc_event): super(NetworkController, self).__init__() # mp.Connection for sending readings from DataClient to StorageController self.storage_sender = storage_sender # mp.Connection for sending and receiving control messages (protobufs) back and forth to GUI # Note: full duplex Pipe self.gui_control_conn = gui_control_conn # mp.Connection for sending ADC readings to GUI for plotting self.gui_data_queue = gui_data_queue # mp.Connection for sending start_time, channel_bitmask, and chunk_size to SC self.file_header_sender = file_header_sender # IPC condition variables self.file_header_available_event = file_header_available_event self.reading_to_be_stored_event = reading_to_be_stored_event self.readings_to_be_plotted_event = readings_to_be_plotted_event # mp.Condition variable for wait/notify on duplex control message connection GUI <--> NC self.control_msg_from_gui_event = control_msg_from_gui_event self.control_msg_from_nc_event = control_msg_from_nc_event # used to stop listener threads and terminate the process gracefully self.stop_event = mp.Event() # mp.Event variable for ControlClient to notify NC that an ACK is available self.ack_msg_from_cc_event = mp.Event() # threading.Event variable to wait on for async client to connect self.control_client_connected_event = threading.Event() self.control_client_disconnected_event = threading.Event() # shared with control client, sends request messages to be sent over TCP # receives ACK messages self.nc_control_conn, self.cc_control_conn = mp.Pipe(duplex=True) # control client will write ACK'd requests here self.ack_queue = mp.Queue() # default to all channels being active # NOTE: this needs to match up with the default state of the channel checkboxes on GUI # and needs to be propagated to DataClient upon any change self.active_channels = [ '0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '1.0', '1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '1.7', '2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7' ] # host and port will be extracted from GUI connect message self.host = '' self.port = 0 # used to keep track of messages that have been sent to ControlClient but not yet ACKed self.sent_dict = {} self.control_client = ControlClient( control_protobuf_conn=self.cc_control_conn, ack_msg_from_cc_event=self.ack_msg_from_cc_event, connected_event=self.control_client_connected_event, disconnected_event=self.control_client_disconnected_event) self.data_client = DataClient( gui_data_queue=self.gui_data_queue, storage_sender=self.storage_sender, reading_to_be_stored_event=self.reading_to_be_stored_event, readings_to_be_plotted_event=self.readings_to_be_plotted_event) self.stop_listener_thread = threading.Thread( target=self.listen_for_stop_event) # receives request protobuf messages triggered by GUI events self.gui_receiver_thread = threading.Thread(target=self.recv_from_gui) self.gui_receiver_thread.daemon = True # listens for ACK messages being passed back from control client self.ack_listener_thread = threading.Thread( target=self.read_ack_messages) self.ack_listener_thread.daemon = True # handle asyncore blocking loop in a separate thread # NOTE: lambda needed so loop() doesn't get called right away and block # 1.0 sets the polling frequency (default=30.0) # use_poll=True is a workaround to avoid "bad file descriptor" upon closing # for python 2.7.X according to GitHub Issue...but it still gives the error self.loop_thread = threading.Thread(target=self.asyncore_loop) self.loop_thread.daemon = True def run(self): self.stop_listener_thread.start() self.gui_receiver_thread.start() self.ack_listener_thread.start() self.gui_receiver_thread.join() logging.debug('NetworkController: gui_receiver thread joined') self.ack_listener_thread.join() logging.debug('NetworkController: ack_listener thread joined') logging.info('NetworkController finished running') def listen_for_stop_event(self): # block until stop_event gets set externally self.stop_event.wait() self.close_data_port() self.close_control_port() def connect_control_port(self, host, port): logging.debug('NetworkController: connect_control_port() entered') if self.control_client is not None and not self.control_client.connected: success, serr = self.control_client.connect_control_port( host, port) self.loop_thread.start() return (success, serr) def connect_data_port(self, host, port, chunk_size, active_channels): logging.debug('NetworkController: connect_data_port() entered') if self.data_client is not None and not self.data_client.connected: return self.data_client.connect_data_port(host, port, chunk_size, active_channels) def close_control_port(self): logging.debug('NetworkController: attempting to close control port') if self.data_client.connected: self.data_client.close_data_port() self.control_client.close_control_port() if self.loop_thread.is_alive(): self.loop_thread.join() logging.info('NetworkController: control and data ports closed') def close_data_port(self): logging.debug('NetworkController: attempting to close data port') return self.data_client.close_data_port() def asyncore_loop(self): while not self.stop_event.is_set(): asyncore.loop(timeout=1.0, count=1, use_poll=True) logging.debug('NetworkController: asyncore loop thread finished') def get_channels_from_bitmask(self, bitmask): active_channels = [] num_ADCs = 4 num_channels_per_ADC = 8 for adc in range(num_ADCs): for channel in range(num_channels_per_ADC): active = np.bitwise_and( np.left_shift(0x01, adc * num_channels_per_ADC + channel), bitmask) if active > 0: active_channels.append(str(adc) + '.' + str(channel)) return active_channels def recv_from_gui(self): while not self.stop_event.is_set(): if self.gui_control_conn.poll(): msg = self.gui_control_conn.recv() logging.info( 'NetworkController: received control message: \n%s', msg) if msg['type'] == 'CONNECT': # TODO: input validation self.host = msg['host'] self.port = msg['port'] success, serr = self.connect_control_port( self.host, self.port) if not success: # ControlClient connect failed, notify GUI reply_msg = msg reply_msg['success'] = False reply_msg['message'] = 'Failed to connect ControlClient to %s:%d, error is %s' % \ (self.host, self.port, serr) self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() else: # construct a StartRequest protobuf message startRequest = control_signals_pb2.StartRequest() startRequest.port = self.port + 1 startRequest.channels = msg['channels'] startRequest.rate = msg['rate'] # wrap it up and copy sequence number requestWrapper = control_signals_pb2.RequestWrapper() requestWrapper.sequence = msg['seq'] requestWrapper.start.MergeFrom(startRequest) # serialize wrapper for sending over Pipe serialized = requestWrapper.SerializeToString() self.nc_control_conn.send(serialized) logging.debug( 'NetworkController: sent serialized requestWrapper to CC' ) # ControlClient uses asyncore so we don't need to notify it if msg['seq'] not in self.sent_dict.keys(): self.sent_dict[msg['seq']] = msg else: raise RuntimeWarning( 'NetworkController: control msg with sequence %d already in sent_dict' % msg['seq']) # asyncore client doesn't connect until it tries to recv/send, # so we need to be notified asynchronously control_client_connected = self.control_client_connected_event.wait( timeout=5.0) if not control_client_connected: # ControlClient connect timed out, notify GUI reply_msg = msg reply_msg['success'] = False reply_msg['message'] = 'Timed out while trying to connect ControlClient to %s:%d' % \ (self.host, self.port) self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() elif msg['type'] == 'DISCONNECT': # construct a StopRequest protobuf message stopRequest = control_signals_pb2.StopRequest() stopRequest.port = self.port + 1 stopRequest.channels = 0xffff # wrap it up and copy sequence number requestWrapper = control_signals_pb2.RequestWrapper() requestWrapper.sequence = msg['seq'] requestWrapper.stop.MergeFrom(stopRequest) # serialize wrapper for sending over Pipe serialized = requestWrapper.SerializeToString() self.nc_control_conn.send(serialized) logging.debug( 'NetworkController: sent serialized requestWrapper to CC' ) # ControlClient uses asyncore so we don't need to notify it if msg['seq'] not in self.sent_dict.keys(): self.sent_dict[msg['seq']] = msg else: raise RuntimeWarning( 'NetworkController: control msg with sequence %d already in sent_dict' % msg['seq']) else: while not self.stop_event.is_set(): if self.control_msg_from_gui_event.wait(1.0): self.control_msg_from_gui_event.clear() break def read_ack_messages(self): while not self.stop_event.is_set(): if self.nc_control_conn.poll(): ack = self.nc_control_conn.recv() ack_wrapper = control_signals_pb2.RequestWrapper() ack_wrapper.ParseFromString(ack) logging.info('NetworkController: received ACK message %s', ack_wrapper) if ack_wrapper.sequence in self.sent_dict.keys(): msg = self.sent_dict.pop(ack_wrapper.sequence) else: msg = {} raise RuntimeWarning( 'NetworkController: received unexpected ACK from ControlClient' ) if ack_wrapper.HasField( 'start') and not self.data_client.connected: logging.info( 'NetworkController: received start ACK, starting data client' ) start_request = control_signals_pb2.StartRequest() start_request.MergeFrom(ack_wrapper.start) # make sure that we're getting the channels we expect if start_request.channels != msg['channels']: raise RuntimeWarning( 'NetworkController: active channels in ACK differ from requested' ) # send header info to SC and notify active_channels = self.get_channels_from_bitmask( start_request.channels) bytes_per_sample = (len(active_channels) + 4) * 2 chunk_size = min( 113, int(msg['rate'] * bytes_per_sample * 0.00001)) print 'received timestamp %d' % start_request.timestamp header = (start_request.timestamp, start_request.channels, chunk_size, start_request.rate) self.file_header_sender.send(header) self.file_header_available_event.set() data_connect_success, data_serr = self.connect_data_port( self.host, start_request.port, chunk_size, active_channels) logging.debug( 'NetworkController: data_connect_success = %s', data_connect_success) if data_connect_success: # construct a success reply message reply_msg = msg reply_msg['success'] = True reply_msg[ 'message'] = 'Successfully connected control and data ports to host %s' % self.host reply_msg['timestamp'] = start_request.timestamp reply_msg['chunk'] = chunk_size # send an ACK message to GUI and notify its receiver self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() else: # construct a failure reply message reply_msg = msg reply_msg['success'] = False reply_msg['message'] = 'Failed to connect DataClient to %s:%d, error is %s' % \ (self.host, start_request.port, data_serr) self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() elif ack_wrapper.HasField( 'stop') and self.data_client.connected: data_port_disconnected = self.close_data_port() self.close_control_port() control_port_disconnected = self.control_client_disconnected_event.wait( timeout=5.0) if data_port_disconnected and control_port_disconnected: reply_msg = msg reply_msg['success'] = True reply_msg[ 'message'] = 'Control and Data clients disconnected successfully' self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() else: reply_msg = msg reply_msg['success'] = False reply_msg[ 'message'] = 'Unable to disconnect properly or control client disconnect timed out' self.gui_control_conn.send(reply_msg) self.control_msg_from_nc_event.set() else: logging.warning( 'NetworkController: received an unexpected ACK type %s', ack_wrapper) else: while not self.stop_event.is_set(): if self.ack_msg_from_cc_event.wait(1.0): self.ack_msg_from_cc_event.clear() break
class LightClient(object): def __init__(self, ip): self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB) self.log.info('[MAIN THREAD] Instantiated client') self.receiving = False self.define_headers() self.targets = {} self.transmit = Queue.Queue() self.data_client = DataClient(self.transmit, ip) self.data_processor = DataProcessor(self.transmit, self.headers, self.targets) self.connect(ip) def connect(self, ip): self.soc_ctrl = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.soc_ctrl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) my_ip = socket.gethostbyname('') self.log.debug('[MAIN THREAD] connecting...') self.soc_ctrl.connect((ip, SOC_PORT_CTRL)) self.log.info('[MAIN THREAD] Client connected to server') def disconnect(self): ### data processor should not be here self.data_processor.stop() self.soc_ctrl.close() def define_headers(self): head = {} head['process'] = PROC_CPU_DATA + PROC_MEM_DATA + TIMESTAMPS head[ 'system'] = SYS_CPU_OTHER + LOAD_AVG + SYS_CPU_DATA + SYS_MEM_DATA + TIMESTAMPS self.headers = head def add_target(self, target, name): if target in self.targets: self.targets[target].append(name) else: self.targets[target] = [name] def remove_target(self, target, name): if target in self.targets: if name in self.targets[target]: self.targets[target].remove(name) self.log.info('[MAIN THREAD] Removed {} named {}'.format( target, name)) else: self.log.error( '[MAIN THREAD] Asked to remove {} named {} while not recorded' .format(target, name)) else: self.log.error( '[MAIN THREAD] Asked to remove {} named {} while not recorded'. format(target, name)) def start_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to start recording') msg = MSG_SEP.join([START_RECORD, target, name]) answer = send_data(self.soc_ctrl, msg) self.log.info('[MAIN THREAD] Server asked to start recording') if answer == SYNC: self.add_target(target, name) self.log.info('[MAIN THREAD] Added {} named {}'.format( target, name)) else: self.log.warn( '[MAIN THREAD] Could not add {} named {} because of server answer' .format(target, name)) def stop_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to stop recording') msg = MSG_SEP.join([STOP_RECORD, target, name]) answer = send_data(self.soc_ctrl, msg) self.log.info( '[MAIN THREAD] Server asked to stop recording {}'.format(name)) if answer == SYNC: self.remove_target(target, name) else: self.log.warn( '[MAIN THREAD] Could not remove {} named {} because of server answer' .format(target, name)) def start_receive(self): if not self.receiving: self.receiving = True self.log.debug('[MAIN THREAD] Asking server to start sending') status = send_data(self.soc_ctrl, START_SEND) self.log.info('[MAIN THREAD] Server asked to start sending') if status == FAIL: self.log.error( '[MAIN THREAD] Client tried to receive but server denied it' ) else: print status self.data_client.start() self.log.info('[MAIN THREAD] Client is receiving') self.log.debug("[MAIN THREAD] DATA THREAD started") else: self.log.warn( "[MAIN THREAD] Asked to start receiving while already receiving" ) def stop_receive(self): if self.receiving: self.log.debug( '[MAIN THREAD] Closing data channel. Exiting data client thread' ) self.data_client.stop() self.log.info("[MAIN THREAD] Asked server to stop receiving") self.receiving = False send_data(self.soc_ctrl, STOP_SEND) else: self.log.warn( "[MAIN THREAD] Asked to stop receiving while already receiving" ) def start_store(self, dirname='easy_client'): return self.data_processor.start_store(dirname) def stop_store(self): self.data_processor.stop_store() def start_print(self): self.data_processor.start_print() def stop_print(self): self.printing = self.data_processor.stop_print() def stop_process(self): self.stop_print() self.stop_store() self.data_processor.stop() self.stop_receive() self.soc_ctrl.close() def stop_all(self): self.stop_process() send_data(self.soc_ctrl, STOP_ALL)
class LightClient(object): def __init__(self, ip): self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB) self.log.info('[MAIN THREAD] Instantiated client') self.receiving = False self.define_headers() self.targets = {} self.transmit = Queue.Queue() self.data_client = DataClient(self.transmit, ip) self.data_processor = DataProcessor(self.transmit, self.headers, self.targets) self.connect(ip) def connect(self, ip): self.soc_ctrl = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.soc_ctrl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) my_ip = socket.gethostbyname('') self.log.debug('[MAIN THREAD] connecting...') self.soc_ctrl.connect((ip,SOC_PORT_CTRL)) self.log.info('[MAIN THREAD] Client connected to server') def disconnect(self): ### data processor should not be here self.data_processor.stop() self.soc_ctrl.close() def define_headers(self): head = {} head['process'] = PROC_CPU_DATA + PROC_MEM_DATA + TIMESTAMPS head['system'] = SYS_CPU_OTHER + LOAD_AVG + SYS_CPU_DATA + SYS_MEM_DATA + TIMESTAMPS self.headers = head def add_target(self, target, name): if target in self.targets: self.targets[target].append(name) else: self.targets[target]=[name] def remove_target(self, target, name): if target in self.targets: if name in self.targets[target]: self.targets[target].remove(name) self.log.info('[MAIN THREAD] Removed {} named {}'.format(target, name)) else: self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name)) else: self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name)) def start_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to start recording') msg = MSG_SEP.join([START_RECORD, target, name]) answer = send_data(self.soc_ctrl,msg) self.log.info('[MAIN THREAD] Server asked to start recording') if answer == SYNC: self.add_target(target, name) self.log.info('[MAIN THREAD] Added {} named {}'.format(target, name)) else: self.log.warn('[MAIN THREAD] Could not add {} named {} because of server answer'.format(target, name)) def stop_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to stop recording') msg = MSG_SEP.join([STOP_RECORD, target, name]) answer = send_data(self.soc_ctrl,msg) self.log.info('[MAIN THREAD] Server asked to stop recording {}'.format(name)) if answer == SYNC: self.remove_target(target, name) else: self.log.warn('[MAIN THREAD] Could not remove {} named {} because of server answer'.format(target, name)) def start_receive(self): if not self.receiving: self.receiving = True self.log.debug('[MAIN THREAD] Asking server to start sending') status = send_data(self.soc_ctrl,START_SEND) self.log.info('[MAIN THREAD] Server asked to start sending') if status == FAIL: self.log.error('[MAIN THREAD] Client tried to receive but server denied it') else: print status self.data_client.start() self.log.info('[MAIN THREAD] Client is receiving') self.log.debug("[MAIN THREAD] DATA THREAD started") else: self.log.warn("[MAIN THREAD] Asked to start receiving while already receiving") def stop_receive(self): if self.receiving: self.log.debug('[MAIN THREAD] Closing data channel. Exiting data client thread') self.data_client.stop() self.log.info("[MAIN THREAD] Asked server to stop receiving") self.receiving = False send_data(self.soc_ctrl,STOP_SEND) else: self.log.warn("[MAIN THREAD] Asked to stop receiving while already receiving") def start_store(self, dirname = 'easy_client'): return self.data_processor.start_store(dirname) def stop_store(self): self.data_processor.stop_store() def start_print(self): self.data_processor.start_print() def stop_print(self): self.printing = self.data_processor.stop_print() def stop_process(self): self.stop_print() self.stop_store() self.data_processor.stop() self.stop_receive() self.soc_ctrl.close() def stop_all(self): self.stop_process() send_data(self.soc_ctrl, STOP_ALL)
class RemoteClient(object): def __init__(self, ip, transmit): if not os.path.isdir(DATA_DIR): os.makedirs(DATA_DIR) # Logging self.log = Logger(CLIENT_LOG_FILE, D_VERB) self.log.info('[MAIN THREAD] Instantiated client') # Central data self.receiving = False self.training = False self.paused = False self.define_headers() self.targets = {} # Workers self.data_client = DataClient(transmit, ip) # Connection self.connect(ip) def connect(self, ip): self.soc_ctrl = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.soc_ctrl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) my_ip = socket.gethostbyname('') self.log.debug('[MAIN THREAD] connecting...') self.soc_ctrl.connect((ip,SOC_PORT_CTRL)) self.log.info('[MAIN THREAD] Client connected to server') def define_headers(self): head = {} self.headers = head def add_target(self, target, name): if target in self.targets: self.targets[target].append(name) else: self.targets[target]=[name] def remove_target(self, target, name): if target in self.targets: if name in self.targets[target]: self.targets[target].remove(name) self.log.info('[MAIN THREAD] Removed {} named {}'.format(target, name)) else: self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name)) else: self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name)) def start_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to start recording') msg = MSG_SEP.join([START_RECORD, target, name]) answer = send_data(self.soc_ctrl,msg) self.log.info('[MAIN THREAD] Server asked to start recording') if answer == SYNC: self.add_target(target, name) self.log.info('[MAIN THREAD] Added {} named {}'.format(target, name)) else: self.log.warn('[MAIN THREAD] Could not add {} named {} because of server answer'.format(target, name)) def stop_record(self, target, name): self.log.debug('[MAIN THREAD] Asking server to stop recording') msg = MSG_SEP.join([STOP_RECORD, target, name]) answer = send_data(self.soc_ctrl,msg) self.log.info('[MAIN THREAD] Server asked to stop recording') if answer == SYNC: self.remove_target(target, name) else: self.log.warn('[MAIN THREAD] Could not remove {} named {} because of server answer'.format(target, name)) def start_receive(self): if not self.receiving: self.receiving = True self.log.debug('[MAIN THREAD] Asking server to start sending') status = send_data(self.soc_ctrl,START_SEND) self.log.info('[MAIN THREAD] Server asked to start sending') if status == FAIL: self.log.error('[MAIN THREAD] Client tried to receive but server denied it') else: self.data_client.start() self.log.info('[MAIN THREAD] Client is receiving') self.log.debug("[MAIN THREAD] DATA THREAD started") else: self.log.warn("[MAIN THREAD] Asked to start receiving while already receiving") def start_training(self): if not self.training: self.training = True self.log.debug('[MAIN THREAD] Asking server to start training') status = send_data(self.soc_ctrl,START_TRAIN) self.log.info('[MAIN THREAD] Server asked to start training') if status == FAIL: self.log.error('[MAIN THREAD] Server refused to start training') else: self.log.info('[MAIN THREAD] Server is training') else: self.log.warn("[MAIN THREAD] Asked to start training while already training") def stop_training(self): if self.training: self.training = False self.log.debug('[MAIN THREAD] Asking server to stop training') status = send_data(self.soc_ctrl,STOP_TRAIN) self.log.info('[MAIN THREAD] Server asked to stop training') if status == FAIL: self.log.error('[MAIN THREAD] Server refused to stop training') else: self.log.info('[MAIN THREAD] Server has stopped training') else: self.log.warn("[MAIN THREAD] Asked to stop training while not training") def pause_training(self): if not self.paused: self.paused = True self.log.debug('[MAIN THREAD] Asking server to pause training') status = send_data(self.soc_ctrl,PAUSE_TRAIN) self.log.info('[MAIN THREAD] Server asked to pause training') if status == FAIL: self.log.error('[MAIN THREAD] Server refused to pause training') else: self.log.info('[MAIN THREAD] Server is paused') else: self.log.warn("[MAIN THREAD] Asked to paused training while already paused") def resume_training(self): if self.paused: self.paused = False self.log.debug('[MAIN THREAD] Asking server to resume training') status = send_data(self.soc_ctrl,RESUME_TRAIN) self.log.info('[MAIN THREAD] Server asked to resume training') if status == FAIL: self.log.error('[MAIN THREAD] Server refused to resume training') else: self.log.info('[MAIN THREAD] Server has resumed training') else: self.log.warn("[MAIN THREAD] Asked to resume training while not paused") def stop_receive(self): if self.receiving: self.log.debug('[MAIN THREAD] Closing data channel. Exiting data client thread') self.data_client.stop() self.log.info("[MAIN THREAD] Asked server to stop receiving") self.receiving = False else: self.log.warn("[MAIN THREAD] Asked to stop receiving while already receiving") #def start_store(self, dirname = 'easy_client'): # return self.data_processor.start_store(dirname) #def stop_store(self): # self.data_processor.stop_store() #def start_print(self): # self.data_processor.start_print() #def stop_print(self): # self.printing = self.data_processor.stop_print() def stop_process(self): self.stop_print() self.stop_store() #self.data_processor.stop() self.stop_receive() self.soc_ctrl.close() def stop_all(self): self.stop_process() send_data(self.soc_ctrl, STOP_ALL)
def test_receive_and_sync_verification(self): dc = DataClient(host, port, storage_sender, gui_data_sender, active_channels) server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 10002)) server_sock.listen(1) print 'listening on %s:%d' % ('localhost', 10002) dc.connect_data_port() conn, addr = server_sock.accept() print 'accepted connection from %s:%d' % (addr[0], addr[1]) input_length = 4 bytes_sent = 0 with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = 99999999 with dc.expected_readings_verified_lock: dc.expected_readings_verified = 99999999 for i in range(input_length): for j in range(len(normal_reading)): bytes_sent += conn.send(np.uint16(normal_reading[j])) with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = bytes_sent print 'Test: finished sending part 1, bytes_sent = %d' % bytes_sent # first two readings will get dropped by sync. recovery filter with dc.expected_readings_verified_lock: dc.expected_readings_verified = input_length - 2 # with dc.receiver_done_cond: # dc.receiver_done_cond.wait() # print 'Receiver finished task 1' with dc.sync_filter_done_cond: dc.sync_filter_done_cond.wait() print 'Sync filter finished task 1' assert dc.synchronized print 'Part 1 passed, synchronization achieved' with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = 99999999 for j in range(len(corrupt_reading)): bytes_sent += conn.send(np.uint16(corrupt_reading[j])) print 'Test: finished sending part 2, bytes_sent = %d' % bytes_sent with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = bytes_sent with dc.receiver_done_cond: dc.receiver_done_cond.wait() print 'Receiver finished task 2' assert not dc.synchronized print 'Part 2 passed, synchronization lost as expected' with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = 99999999 with dc.expected_readings_verified_lock: dc.expected_readings_verified = 99999999 for i in range(input_length): for j in range(len(normal_reading)): bytes_sent += conn.send(np.uint16(normal_reading[j])) print 'Test: finished sending part 3, bytes_sent = %d' % bytes_sent with dc.expected_bytes_sent_lock: dc.expected_bytes_sent = bytes_sent with dc.expected_readings_verified_lock: dc.expected_readings_verified = (input_length - 2) * 2 with dc.sync_filter_done_cond: dc.sync_filter_done_cond.wait() print 'Sync filter finished task 3' assert dc.synchronized dc.close_data_port() conn.close() server_sock.close()
def __init__(self, storage_sender, gui_control_conn, gui_data_queue, file_header_sender, file_header_available_event, reading_to_be_stored_event, readings_to_be_plotted_event, control_msg_from_gui_event, control_msg_from_nc_event): super(NetworkController, self).__init__() # mp.Connection for sending readings from DataClient to StorageController self.storage_sender = storage_sender # mp.Connection for sending and receiving control messages (protobufs) back and forth to GUI # Note: full duplex Pipe self.gui_control_conn = gui_control_conn # mp.Connection for sending ADC readings to GUI for plotting self.gui_data_queue = gui_data_queue # mp.Connection for sending start_time, channel_bitmask, and chunk_size to SC self.file_header_sender = file_header_sender # IPC condition variables self.file_header_available_event = file_header_available_event self.reading_to_be_stored_event = reading_to_be_stored_event self.readings_to_be_plotted_event = readings_to_be_plotted_event # mp.Condition variable for wait/notify on duplex control message connection GUI <--> NC self.control_msg_from_gui_event = control_msg_from_gui_event self.control_msg_from_nc_event = control_msg_from_nc_event # used to stop listener threads and terminate the process gracefully self.stop_event = mp.Event() # mp.Event variable for ControlClient to notify NC that an ACK is available self.ack_msg_from_cc_event = mp.Event() # threading.Event variable to wait on for async client to connect self.control_client_connected_event = threading.Event() self.control_client_disconnected_event = threading.Event() # shared with control client, sends request messages to be sent over TCP # receives ACK messages self.nc_control_conn, self.cc_control_conn = mp.Pipe(duplex=True) # control client will write ACK'd requests here self.ack_queue = mp.Queue() # default to all channels being active # NOTE: this needs to match up with the default state of the channel checkboxes on GUI # and needs to be propagated to DataClient upon any change self.active_channels = [ '0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '1.0', '1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '1.7', '2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7' ] # host and port will be extracted from GUI connect message self.host = '' self.port = 0 # used to keep track of messages that have been sent to ControlClient but not yet ACKed self.sent_dict = {} self.control_client = ControlClient( control_protobuf_conn=self.cc_control_conn, ack_msg_from_cc_event=self.ack_msg_from_cc_event, connected_event=self.control_client_connected_event, disconnected_event=self.control_client_disconnected_event) self.data_client = DataClient( gui_data_queue=self.gui_data_queue, storage_sender=self.storage_sender, reading_to_be_stored_event=self.reading_to_be_stored_event, readings_to_be_plotted_event=self.readings_to_be_plotted_event) self.stop_listener_thread = threading.Thread( target=self.listen_for_stop_event) # receives request protobuf messages triggered by GUI events self.gui_receiver_thread = threading.Thread(target=self.recv_from_gui) self.gui_receiver_thread.daemon = True # listens for ACK messages being passed back from control client self.ack_listener_thread = threading.Thread( target=self.read_ack_messages) self.ack_listener_thread.daemon = True # handle asyncore blocking loop in a separate thread # NOTE: lambda needed so loop() doesn't get called right away and block # 1.0 sets the polling frequency (default=30.0) # use_poll=True is a workaround to avoid "bad file descriptor" upon closing # for python 2.7.X according to GitHub Issue...but it still gives the error self.loop_thread = threading.Thread(target=self.asyncore_loop) self.loop_thread.daemon = True