def start(): # create config dict config = config_init(argv) q_from_main_to_listener = Queue() # Debug mode uses LSL_Generator for debuging if config['general'].getboolean('debug_mode'): print('Debug Mode!!!') debug_time = config['general'].getint('debug_time') lsl_stream_generator_path = config['paths'][ 'lsl_stream_generator_path'] import importlib.util spec = importlib.util.spec_from_file_location( "lsl_stream_generator", lsl_stream_generator_path) lsl_stream_generator = importlib.util.module_from_spec(spec) spec.loader.exec_module(lsl_stream_generator) lsl_stream_debug = lsl_stream_generator.LSL_Generator( debug_time, 69, 2048, q_from_main_to_listener) lsl_stream_debug.start() time.sleep(2) # Queue is used to pass arguments from display thread to main thread (to LSL_Listener) q_from_main_to_listener.put(('lsl_stream_listener_state', True)) q_from_main_to_listener.put(('patient_state', 1)) lsl_listener = LSL_Listener(config, 2048, q_from_main_to_listener) time.sleep(0.5) record_time = 20 # initialise thread for display thread = Thread(target=update, args=(q_from_main_to_listener, record_time)) thread.start() lsl_listener.record_using_buffer()
def main(): setpriority(pid=None, priority=5) config = config_init(sys.argv) # Debug mode uses LSL_Generator for debuging if config['general'].getboolean('debug_mode'): print('Debug Mode!!!') sys.path.append(config['paths']['lsl_stream_generator_path']) sys.path.append(config['paths']['lsl_stream_generator_path'] + '/pynfb') from generators import run_eeg_sim freq = config['amp_config'].getint('fs_amp') name = config['amp_config']['lsl_stream_name_amp'] labels = [ 'channel{}'.format(i) for i in range(config['amp_config'].getint('n_channels_amp')) ] lsl_stream_debug = lambda: run_eeg_sim(freq, name=name, labels=labels) lsl_stream_debug_tread = Thread(target=lsl_stream_debug, args=()) lsl_stream_debug_tread.daemon = True lsl_stream_debug_tread.start() print("generators.run_eeg_sim start DEBUG LSL \"{}\"".format( config['amp_config']['lsl_stream_name_amp'])) print( 'Running application, please check:\n 1) PN connection\n 2) Amplifier connection' ) # initiate stream of PN and Amp data pnhandler = PNHandler(config) pnhandler.start() inlet_amp = get_inlet_amp(config) time.sleep(1.5) # record train data if config['general'].getboolean('record_enable'): experiment_record = ExperimentRecord(config, pnhandler, inlet_amp) experiment_record.record_data() input("Data is recorded, press Enter to continue...") # start realtime experiment if not config['general'].getboolean('realtime_enable'): return # stimulate during realtime stimulator = Stimulator(config) stimulator.connect() # try: experiment_realtime = ExperimentRealtime(config, pnhandler, inlet_amp, stimulator) experiment_realtime.fit() experiment_realtime.decode() finally: #experiment_realtime.stop() stimulator.close_connection()
def main(): logging.basicConfig( level=logging.INFO, format= '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a %d %b %Y %H:%M:%S') if not config.config_init(): logger.fatal( "config init failed , please check above log and rerun again") time.sleep(10) return -1 while True: try: result = update_domain_A_record() if result: break else: time.sleep(10) except Exception as ex: logger.fatal(ex) time.sleep(10) user_alive_time = time.time() str_user_alive_time = time.asctime() while True: try: cnt = get_current_login_user() if cnt is None or cnt > 0: user_alive_time = time.time() str_user_alive_time = time.asctime() if user_alive_time + config.MAX_IDLE_TIME < time.time(): logger.info( f"idle time({config.MAX_IDLE_TIME}) reached since last user login time is " f"({str_user_alive_time}).We'll close the instance by name ({config.INSTANCE_NAME})" ) ali_instance.stop_instance_by_name(config.INSTANCE_NAME) break else: time.sleep(60) except Exception as ex: logger.fatal(ex) time.sleep(60) while True: logger.info("waiting server to die...") time.sleep(60)
def start(): # create config dict config = config_init(argv) # Queue is used to pass arguments from display thread to main thread (to LSL_Listener) q_from_display_to_recorder = Queue() # remove_mode does not use data from lsl, just show pictures if config['general'].getboolean('show_objects_mode') or config[ 'general'].getboolean('show_actions_mode'): patient_display = Display(config, q_from_display_to_recorder) patient_display.start() return # Debug mode uses LSL_Generator for debuging if config['general'].getboolean('debug_mode'): print('Debug Mode!!!') debug_time = config['general'].getint('debug_time') lsl_stream_generator_path = config['paths'][ 'lsl_stream_generator_path'] import importlib.util spec = importlib.util.spec_from_file_location( "lsl_stream_generator", lsl_stream_generator_path) lsl_stream_generator = importlib.util.module_from_spec(spec) spec.loader.exec_module(lsl_stream_generator) lsl_stream_debug = lsl_stream_generator.LSL_Generator( debug_time, 69, 2048, q_from_display_to_recorder) lsl_stream_debug.start() time.sleep(2) # create LSL_Listener object recorder = Recorder(config, q_from_display_to_recorder) # Activate Display if not debugging or if debugging with stream from LSL_Generator #if not config['general'].getboolean('debug_mode') or config['general'].getboolean('lsl_outlet_random'): patient_display = Display(config, q_from_display_to_recorder) patient_display.start() time.sleep(0.5) # Type data saving recorder.record() # process data and plot results decoder = Decoder(config) decoder.process_current_file()
if datasets: dataset_stacked = np.vstack(datasets) new_length = dataset_raw_data.shape[0] + dataset_stacked.shape[0] dataset_raw_data.resize(new_length, axis = 0) dataset_raw_data[-dataset_stacked.shape[0]:] = dataset_stacked for i in range(1, len(file[group].keys())): del file[group]['raw_data{}'.format(i)] if __name__ == '__main__': config = config_init(['']) recorder = Recorder(config) recorder.reforge_into_raw_data() add = 1 if add: a = np.random.random(size=(3,72)) for i in range(2): recorder.save_data_rest(a) for i in range(6): recorder.save_data_actions(a) with h5py.File(save_data.experiment_data_path, "r") as file: keys = file.keys() print(keys) for key in keys: if key != 'fs':
# Suppress warnings import warnings warnings.filterwarnings("ignore") # For developer mode only if __name__ == '__main__': # Set base directory config.set_base_dir(os.path.dirname(os.path.realpath(__file__))) # Set resource directory config.set_resource_dir() # Initialize configurations config.config_init() from utils import bgcolors GLOBAL_HOST = config.get_config('camera_database', 'GLOBAL_HOST') GLOBAL_USER = config.get_config('camera_database', 'GLOBAL_USER') GLOBAL_PWD = config.get_config('camera_database', 'GLOBAL_PWD') class CameraDatabase(): def __init__(self, db_name, db_table, host=GLOBAL_HOST, user=GLOBAL_USER, password=GLOBAL_PWD,
class Saver(): def __init__(self, config): self.config = config # create Path to experiment_data file self.experiment_data_path = Path(self.config['paths']['experiment_data_path']) self.dataset_width = self.config['saver'].getint('dataset_width') # create h5 experiment_data file with empty groups of nonfixed length and group with fs self.groups = self.config['saver']['group_names'].split(' ') if not self.experiment_data_path.is_file(): with h5py.File(self.experiment_data_path, 'a') as file: for group in self.groups: file.create_dataset(group + '/raw_data', (0, self.dataset_width), maxshape=(None, self.dataset_width)) file.create_dataset('fs', data=np.array(self.config['general'].getint('fs'))) # save chunk of data into new raw_data# dataset def save_data_buffer(self, data, data_type): with h5py.File(self.experiment_data_path, "a") as file: keys = file[data_type].keys() dataset_name = '{}/raw_data{}'.format(data_type, len(keys)) file.create_dataset(dataset_name, shape=data.shape, data=data) def save(self): # after saving into file with raw_data# chunks remake into single raw_data def reforge_into_raw_data(self): with h5py.File(self.experiment_data_path, "a") as file: for group in self.groups: dataset_raw_data = file[group]['raw_data'] datasets = [] if len(file[group].keys()) > 1: for i in range(1, len(file[group].keys())): dataset = file[group]['raw_data{}'.format(i)][()] datasets.append(dataset) if datasets: dataset_stacked = np.vstack(datasets) new_length = dataset_raw_data.shape[0] + dataset_stacked.shape[0] dataset_raw_data.resize(new_length, axis = 0) dataset_raw_data[-dataset_stacked.shape[0]:] = dataset_stacked for i in range(1, len(file[group].keys())): del file[group]['raw_data{}'.format(i)] if __name__ == '__main__': config = config_init(['']) recorder = Recorder(config) recorder.reforge_into_raw_data() add = 1 if add: a = np.random.random(size=(3,72)) for i in range(2): recorder.save_data_rest(a) for i in range(6): recorder.save_data_actions(a) with h5py.File(save_data.experiment_data_path, "r") as file: keys = file.keys() print(keys) for key in keys: if key != 'fs': print(key) print(file[key].keys()) print(key, file[key]['raw_data'].shape) for i in range(1, len(file[key].keys())): print(key, file[key]['raw_data{}'.format(i)].shape)
def prepare(self, X, Y, rotate): if rotate: self._rotate_resize(X, Y) self._pad(X, Y) def get_img(self): return self.img def get_file_name(self): return self.file_name def get_number(self): return int(self.file_name[:-4]) def get_type(self): return self.picture_type def shape(self): return self.img.shape if __name__ == '__main__': from queue import Queue q = Queue() from config import config_init argv = [] config = config_init(argv) d = Display(config, q) d.start() # d._show_image(d.image_button_any, 5000)