Exemple #1
0
def update_result_to_new_iterator(basename):
    exp = create_experiment(basename + '.yaml')
    model = load_model(basename + '.pkl')
    exp.final_layer = model
    exp.setup()
    datasets = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)
    exp.create_monitors(datasets)
    exp.monitor_manager.monitor_epoch(exp.monitor_chans, datasets,
                                      exp.iterator)

    result = np.load(basename + '.result.pkl')

    for set_name in ['train', 'valid', 'test']:
        for chan_name in 'loss', 'sample_misclass':
            full_chan_name = set_name + '_' + chan_name

            assert np.allclose(
                result.monitor_channels[full_chan_name][-1],
                exp.monitor_chans[full_chan_name][-1],
                rtol=1e-3,
                atol=1e-3), ("Not close: old {:f}, new: {:f}".format(
                    result.monitor_channels[full_chan_name][-1],
                    exp.monitor_chans[full_chan_name][-1]))

    for set_name in ['train', 'valid', 'test']:
        full_chan_name = set_name + '_' + 'misclass'
        result.monitor_channels[full_chan_name][-1] = exp.monitor_chans[
            full_chan_name][-1]

    result_filename = basename + '.result.pkl'
    pickle.dump(result, open(result_filename, 'w'))
def update_result_to_new_iterator(basename):
    exp = create_experiment(basename + '.yaml')
    model = load_model(basename + '.pkl')
    exp.final_layer = model
    exp.setup()
    datasets = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)
    exp.create_monitors(datasets)
    exp.monitor_manager.monitor_epoch(exp.monitor_chans, datasets, 
            exp.iterator)
    
    result = np.load(basename + '.result.pkl')

    for set_name in ['train', 'valid', 'test']:
        for chan_name in 'loss', 'sample_misclass':
            full_chan_name = set_name + '_' + chan_name

            assert np.allclose(result.monitor_channels[full_chan_name][-1], 
                        exp.monitor_chans[full_chan_name][-1], 
                        rtol=1e-3, atol=1e-3), ( 
                        "Not close: old {:f}, new: {:f}".format(result.monitor_channels[full_chan_name][-1],
                        exp.monitor_chans[full_chan_name][-1]))

    for set_name in ['train', 'valid', 'test']:
        full_chan_name = set_name + '_' + 'misclass'
        result.monitor_channels[full_chan_name][-1] = exp.monitor_chans[full_chan_name][-1]

    result_filename = basename + '.result.pkl'
    pickle.dump(result, open(result_filename, 'w'))
Exemple #3
0
def load_model(basename):
    """Load model with params from .yaml and .npy files."""
    exp = create_experiment(basename + '.yaml')
    params = np.load(basename + '.npy')
    model = exp.final_layer
    set_param_values_backwards_compatible(model, params)

    return model
Exemple #4
0
def load_model(basename):
    """Load model with params from .yaml and .npy files."""
    exp = create_experiment(basename + '.yaml')
    params = np.load(basename + '.npy')
    model = exp.final_layer
    set_param_values_backwards_compatible(model, params)
            
    return model
def send_file_data():
    print("Loading Experiment...")
    # Use model to get cnt preprocessors
    base_name = 'data/models/online/cnt/shallow-uneven-trials/9'
    exp = create_experiment(base_name + '.yaml')

    print("Loading File...")
    offline_execution_set = BBCIDataset('data/four-sec-dry-32-sensors/cabin/'
        'MaVo2_sahara32_realMovementS001R02_ds10_1-5.BBCI.mat')
    cnt = offline_execution_set.load()
    print("Running preprocessings...")
    cnt_preprocs = exp.dataset.cnt_preprocessors
    assert cnt_preprocs[-1][0].__name__ == 'exponential_standardize_cnt'
    # Do not do standardizing as it will be done by coordinator
    for preproc, kwargs in cnt_preprocs[:-1]:
        cnt = preproc(cnt, **kwargs)
    cnt_data = cnt.data.astype(np.float32)
    assert not np.any(np.isnan(cnt_data))
    assert not np.any(np.isinf(cnt_data))
    assert not np.any(np.isneginf(cnt_data))
    print("max possible block", np.ceil(len(cnt_data) / 50.0))
    y_labels = create_y_labels(cnt).astype(np.float32)
    assert np.array_equal(np.unique(y_labels), range(5)), ("Should only have "
        "labels 0-4")
    print("Done.")
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect(("127.0.0.1", 1234))
    
    chan_names = ['Fp1', 'Fpz', 'Fp2', 'AF7', 'AF3',
            'AFz', 'AF4', 'AF8', 'F5', 'F3', 'F1', 'Fz', 'F2', 'F4', 'F6',
            'FC1', 'FCz', 'FC2', 'C3', 'C1', 'Cz', 'C2', 'C4', 'CP3', 'CP1',
             'CPz', 'CP2', 'CP4', 'P1', 'Pz', 'P2', 'POz', 'marker']
    
    chan_line = " ".join(chan_names) + "\n"
    s.send(chan_line)
    n_chans = 33
    n_samples = 50
    s.send(np.array([n_chans], dtype=np.int32).tobytes())
    s.send(np.array([n_samples], dtype=np.int32).tobytes())
    print("Sending data...")
    i_block = 0 # if setting i_block to sth higher, printed results will incorrect
    max_stop_block = np.ceil(len(cnt_data) / float(n_samples))
    stop_block = 800
    assert stop_block < max_stop_block
    while i_block < stop_block:
        arr = cnt_data[i_block * n_samples:i_block*n_samples + n_samples,:].T
        this_y = y_labels[i_block * n_samples:i_block*n_samples + n_samples]
        # chan x time
        arr = np.concatenate((arr, this_y[np.newaxis, :]), axis=0).astype(np.float32)
        s.send(arr.tobytes(order='F'))
        assert arr.shape == (n_chans, n_samples)
        i_block +=1
        gevent.sleep(0.01)
    print("Done.")
    return cnt
Exemple #6
0
def load_exp_and_model(basename, set_invalid_to_NaN=True, seed=9859295):
    """ Loads experiment and model for analysis, sets invalid fillv alues to NaN."""
    model = load_model(basename)
    exp = create_experiment(basename + '.yaml', seed=seed)
    if set_invalid_to_NaN:
        all_layers = lasagne.layers.get_all_layers(model)
        # mark nans to be sure you are doing correct transformations
        # also necessary for transformations to cnt and time activations
        for l in all_layers:
            if hasattr(l, 'invalid_fill_value'):
                l.invalid_fill_value = np.nan
    return exp, model
Exemple #7
0
def load_exp_and_model(basename, set_invalid_to_NaN=True, seed=9859295):
    """ Loads experiment and model for analysis, sets invalid fillv alues to NaN."""
    model = load_model(basename)
    exp = create_experiment(basename + '.yaml', seed=seed)
    if set_invalid_to_NaN:
        all_layers = lasagne.layers.get_all_layers(model)
        # mark nans to be sure you are doing correct transformations
        # also necessary for transformations to cnt and time activations
        for l in all_layers:
            if hasattr(l, 'invalid_fill_value'):
                l.invalid_fill_value = np.nan
    return exp, model
def create_env_class_corr_files_after_load(base_name, with_square, exp, model):
    train_set = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)['train']

    trial_env = load_trial_env(base_name + '.env.npy',
           model, i_layer=26, # 26 is last max-pool i think 
           train_set=train_set,
          n_inputs_per_trial=2,
          square_before_mean=with_square)
    topo_corrs = compute_env_class_corr(exp, trial_env)
    rand_model = create_experiment(base_name + '.yaml').final_layer

    rand_topo_corrs = compute_rand_preds_topo_corr(exp, rand_model, 
        trial_env)
    file_name_end = 'class.npy'
    if with_square:
        file_name_end = 'square.' + file_name_end
    np.save('{:s}.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
    np.save('{:s}.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
def create_unit_output_class_corrs(basename, i_all_layers):
    exp, model = load_exp_and_model(basename)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(basename + '.yaml').final_layer
    for i_layer in i_all_layers:
        trained_corrs = unit_output_class_corrs(model, exp.iterator,
            train_set, i_layer)
        untrained_corrs = unit_output_class_corrs(rand_model, exp.iterator,
            train_set, i_layer)
        file_name_end = '{:d}.npy'.format(i_layer)
        trained_filename = '{:s}.unit_class_corrs.{:s}'.format(basename,
            file_name_end)
        untrained_filename = '{:s}.rand_unit_class_corrs.{:s}'.format(basename,
            file_name_end)
        log.info("Saving to {:s} and {:s}".format(trained_filename,
            untrained_filename))
        np.save(trained_filename, trained_corrs)
        np.save(untrained_filename, untrained_corrs)
def create_unit_output_class_corrs(basename, i_all_layers):
    exp, model = load_exp_and_model(basename)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(basename + '.yaml').final_layer
    for i_layer in i_all_layers:
        trained_corrs = unit_output_class_corrs(model, exp.iterator, train_set,
                                                i_layer)
        untrained_corrs = unit_output_class_corrs(rand_model, exp.iterator,
                                                  train_set, i_layer)
        file_name_end = '{:d}.npy'.format(i_layer)
        trained_filename = '{:s}.unit_class_corrs.{:s}'.format(
            basename, file_name_end)
        untrained_filename = '{:s}.rand_unit_class_corrs.{:s}'.format(
            basename, file_name_end)
        log.info("Saving to {:s} and {:s}".format(trained_filename,
                                                  untrained_filename))
        np.save(trained_filename, trained_corrs)
        np.save(untrained_filename, untrained_corrs)
Exemple #11
0
def create_env_class_corr_files_after_load(base_name, with_square, exp, model):
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']

    trial_env = load_trial_env(
        base_name + '.env.npy',
        model,
        i_layer=26,  # 26 is last max-pool i think 
        train_set=train_set,
        n_inputs_per_trial=2,
        square_before_mean=with_square)
    topo_corrs = compute_env_class_corr(exp, trial_env)
    rand_model = create_experiment(base_name + '.yaml').final_layer

    rand_topo_corrs = compute_rand_preds_topo_corr(exp, rand_model, trial_env)
    file_name_end = 'class.npy'
    if with_square:
        file_name_end = 'square.' + file_name_end
    np.save('{:s}.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
    np.save('{:s}.env_rand_corrs.{:s}'.format(base_name, file_name_end),
            rand_topo_corrs)
def create_topo_env_corrs_files(base_name, i_all_layers, with_square):
    # Load env first to make sure env is actually there.
    result = np.load(base_name + '.result.pkl')
    env_file_name = dataset_to_env_file(result.parameters['dataset_filename'])
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(base_name + '.yaml').final_layer
    for i_layer in i_all_layers:
        log.info("Layer {:d}".format(i_layer))
        trial_env = load_trial_env(env_file_name, model, 
            i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square)
        topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, 
            exp.iterator, trial_env)
        
        rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, 
            exp.iterator, trial_env)
        file_name_end = '{:d}.npy'.format(i_layer)
        if with_square:
            file_name_end = 'square.' + file_name_end
        np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
        np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
    return
Exemple #13
0
def create_topo_env_corrs_files(base_name, i_all_layers, with_square):
    # Load env first to make sure env is actually there.
    result = np.load(base_name + '.result.pkl')
    env_file_name = dataset_to_env_file(result.parameters['dataset_filename'])
    exp, model = load_exp_and_model(base_name)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(
        exp.dataset)['train']
    rand_model = create_experiment(base_name + '.yaml').final_layer
    for i_layer in i_all_layers:
        log.info("Layer {:d}".format(i_layer))
        trial_env = load_trial_env(env_file_name, model, 
            i_layer, train_set, n_inputs_per_trial=2, square_before_mean=with_square)
        topo_corrs = compute_trial_topo_corrs(model, i_layer, train_set, 
            exp.iterator, trial_env, split_per_class=True)
        
        rand_topo_corrs = compute_trial_topo_corrs(rand_model, i_layer, train_set, 
            exp.iterator, trial_env, split_per_class=True)
        file_name_end = '{:d}.npy'.format(i_layer)
        if with_square:
            file_name_end = 'square.' + file_name_end
        np.save('{:s}.labelsplitted.env_corrs.{:s}'.format(base_name, file_name_end), topo_corrs)
        np.save('{:s}.labelsplitted.env_rand_corrs.{:s}'.format(base_name, file_name_end), rand_topo_corrs)
    return
Exemple #14
0
def send_file_data():
    print("Loading Experiment...")
    # Use model to get cnt preprocessors
    base_name = 'data/models/online/cnt/start-end-mrk/125'
    exp = create_experiment(base_name + '.yaml')

    print("Loading File...")
    offline_execution_set = BBCIDataset(
        'data/robot-hall/NiRiNBD15_cursor_250Hz.BBCI.mat',
        load_sensor_names=get_nico_sensors())
    cnt = offline_execution_set.load()
    log.info("Preprocessing...")
    cnt = resample_cnt(cnt, newfs=100)
    cnt = lowpass_cnt(cnt, high_cut_off_hz=40, filt_order=10)
    log.info("Done.")
    cnt_data = cnt.data.astype(np.float32)
    assert not np.any(np.isnan(cnt_data))
    assert not np.any(np.isinf(cnt_data))
    assert not np.any(np.isneginf(cnt_data))
    print("max possible block", np.ceil(len(cnt_data) / 50.0))
    segmenter = MarkerSegmenter(
        segment_ival=(500, 0),
        marker_def={
            'Right Hand': [1],
            'Feet': [2],
            'Rotation': [3],
            'Words': [4],
            'Rest': [5]
        },
        trial_classes=['Right Hand', 'Feet', 'Rotation', 'Words', 'Rest'],
        end_marker_def={
            'Right Hand': [10],
            'Feet': [20],
            'Rotation': [30],
            'Words': [40],
            'Rest': [50],
        },
    )
    cnt_y, class_names = segmenter.segment(cnt)
    has_marker = np.sum(cnt_y, axis=1) > 0
    new_y = np.zeros(cnt_y.shape[0], dtype=np.int32)
    new_y[has_marker] = (np.argmax(cnt_y[has_marker], axis=1) + 1)
    print("Done.")
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect(("127.0.0.1", 7987))

    chan_names = [
        'Fp1', 'Fpz', 'Fp2', 'AF7', 'AF3', 'AF4', 'AF8', 'F7', 'F5', 'F3',
        'F1', 'Fz', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5', 'FC3', 'FC1', 'FCz',
        'FC2', 'FC4', 'FC6', 'FT8', 'M1', 'T7', 'C5', 'C3', 'C1', 'Cz', 'C2',
        'C4', 'C6', 'T8', 'M2', 'TP7', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2',
        'CP4', 'CP6', 'TP8', 'P7', 'P5', 'P3', 'P1', 'Pz', 'P2', 'P4', 'P6',
        'P8', 'PO7', 'PO5', 'PO3', 'POz', 'PO4', 'PO6', 'PO8', 'O1', 'Oz',
        'O2', 'marker'
    ]

    chan_line = " ".join(chan_names) + "\n"
    s.send(chan_line)
    n_chans = 65
    n_samples = 50
    s.send(np.array([n_chans], dtype=np.int32).tobytes())
    s.send(np.array([n_samples], dtype=np.int32).tobytes())
    print("Sending data...")
    i_block = 0  # if setting i_block to sth higher, printed results will incorrect
    max_stop_block = np.ceil(len(cnt_data) / float(n_samples))
    stop_block = 800
    assert stop_block < max_stop_block
    while i_block < stop_block:
        arr = cnt_data[i_block * n_samples:i_block * n_samples +
                       n_samples, :].T
        this_y = new_y[i_block * n_samples:i_block * n_samples + n_samples]
        # chan x time
        arr = np.concatenate((arr, this_y[np.newaxis, :]),
                             axis=0).astype(np.float32)
        s.send(arr.tobytes(order='F'))
        assert arr.shape == (n_chans, n_samples)
        i_block += 1
        gevent.sleep(0.03)
    print("Done.")
    return cnt
Exemple #15
0
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors,
        use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size,
        learning_rate, n_min_trials, trial_start_offset, break_start_offset,
        break_stop_offset,
        pred_freq,
        incoming_port,load_old_data,use_new_adam_params,
        input_time_length):
    setup_logging()
    assert np.little_endian, "Should be in little endian"
    train_params = None # for trainer, e.g. adam params
    if params_filename is not None:
        if params_filename == 'newest':
            # sort will already sort temporally with our time string format
            all_params_files = sorted(glob(base_name + ".*.model_params.npy"))
            assert len(all_params_files) > 0, ("Expect atleast one params file "
                "if 'newest' given as argument")
            params_filename = all_params_files[-1]
        log.info("Loading model params from {:s}".format(params_filename))
        params = np.load(params_filename)
        train_params_filename = params_filename.replace('model_params.npy',
            'trainer_params.npy')
        if os.path.isfile(train_params_filename):
            if use_new_adam_params:
                log.info("Loading trainer params from {:s}".format(train_params_filename))
                train_params = np.load(train_params_filename)
        else:
            log.warn("No train/adam params found, starting optimization params "
                "from scratch (model params will be loaded anyways).")
    else:
        params = np.load(base_name + '.npy')
    exp = create_experiment(base_name + '.yaml')
    
    # Possibly change input time length, for exmaple
    # if input time length very long during training and should be
    # shorter for online
    if input_time_length is not None:
        log.info("Change input time length to {:d}".format(input_time_length))
        set_input_window_length(exp.final_layer, input_time_length)
        # probably unnecessary, just for safety
        exp.iterator.input_time_length = input_time_length
    # Have to set for both exp final layer and actually used model
    # as exp final layer might be used for adaptation
    # maybe check this all for correctness?
    cnt_model = exp.final_layer
    lasagne.layers.set_all_param_values(cnt_model, params)
    prediction_model = transform_to_normal_net(cnt_model)
    lasagne.layers.set_all_param_values(prediction_model, params)
    
    data_processor = StandardizeProcessor(factor_new=1e-3)
    online_model = OnlineModel(prediction_model)
    if adapt_model:
        online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, 
            batch_size, learning_rate, n_min_trials, trial_start_offset,
            break_start_offset=break_start_offset,
            break_stop_offset=break_stop_offset,
            train_param_values=train_params)
    else:
        log.info("Not adapting model...")
        online_trainer = NoTrainer()
    coordinator = OnlineCoordinator(data_processor, online_model, online_trainer,
        pred_freq=pred_freq)
    hostname = ''
    server = PredictionServer((hostname, incoming_port), coordinator=coordinator,
        ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors,
        use_ui_server=use_ui_server, save_data=save_data,
        model_base_name=base_name, adapt_model=adapt_model)
    # Compilation takes some time so initialize trainer already
    # before waiting in connection in server
    online_trainer.initialize()
    if adapt_model and load_old_data:
        online_trainer.add_data_from_today(data_processor)
    log.info("Starting server on port {:d}".format(incoming_port))
    server.start()
    log.info("Started server")
    server.serve_forever()
Exemple #16
0
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors,
        use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size,
        learning_rate, n_min_trials, trial_start_offset, break_start_offset,
        break_stop_offset,
        pred_gap,
        incoming_port,load_old_data,use_new_adam_params,
        input_time_length,
        train_on_breaks,
        min_break_samples,
        min_trial_samples):
    setup_logging()
    assert np.little_endian, "Should be in little endian"
    train_params = None # for trainer, e.g. adam params
    if params_filename is not None:
        if params_filename == 'newest':
            # sort will already sort temporally with our time string format
            all_params_files = sorted(glob(base_name + ".*.model_params.npy"))
            assert len(all_params_files) > 0, ("Expect atleast one params file "
                "if 'newest' given as argument")
            params_filename = all_params_files[-1]
        log.info("Loading model params from {:s}".format(params_filename))
        params = np.load(params_filename)
        train_params_filename = params_filename.replace('model_params.npy',
            'trainer_params.npy')
        if os.path.isfile(train_params_filename):
            if use_new_adam_params:
                log.info("Loading trainer params from {:s}".format(train_params_filename))
                train_params = np.load(train_params_filename)
        else:
            log.warn("No train/adam params found, starting optimization params "
                "from scratch (model params will be loaded anyways).")
    else:
        params = np.load(base_name + '.npy')
    exp = create_experiment(base_name + '.yaml')
    
    # Possibly change input time length, for exmaple
    # if input time length very long during training and should be
    # shorter for online
    if input_time_length is not None:
        log.info("Change input time length to {:d}".format(input_time_length))
        set_input_window_length(exp.final_layer, input_time_length)
        # probably unnecessary, just for safety
        exp.iterator.input_time_length = input_time_length
    # Have to set for both exp final layer and actually used model
    # as exp final layer might be used for adaptation
    # maybe check this all for correctness?
    cnt_model = exp.final_layer
    set_param_values_backwards_compatible(cnt_model, params)
    prediction_model = transform_to_normal_net(cnt_model)
    set_param_values_backwards_compatible(prediction_model, params)
    
    data_processor = StandardizeProcessor(factor_new=1e-3)
    online_model = OnlineModel(prediction_model)
    if adapt_model:
        online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, 
            batch_size, learning_rate, n_min_trials, trial_start_offset,
            break_start_offset=break_start_offset,
            break_stop_offset=break_stop_offset,
            train_param_values=train_params,
            add_breaks=train_on_breaks,
            min_break_samples=min_break_samples,
            min_trial_samples=min_trial_samples)
    else:
        log.info("Not adapting model...")
        online_trainer = NoTrainer()
    coordinator = OnlineCoordinator(data_processor, online_model, online_trainer,
        pred_gap=pred_gap)
    hostname = ''
    server = PredictionServer((hostname, incoming_port), coordinator=coordinator,
        ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors,
        use_ui_server=use_ui_server, save_data=save_data,
        model_base_name=base_name, adapt_model=adapt_model)
    # Compilation takes some time so initialize trainer already
    # before waiting in connection in server
    online_trainer.initialize()
    if adapt_model and load_old_data:
        online_trainer.add_data_from_today(data_processor)
    log.info("Starting server on port {:d}".format(incoming_port))
    server.start()
    log.info("Started server")
    server.serve_forever()
Exemple #17
0
def _load_experiment(experiment_file_name):
    exp = create_experiment(experiment_file_name)
    exp.dataset.load()
    train_set = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)['train']
    return exp.iterator, train_set