def get_layers(self):
     layers = get_layers(self.layers)
     final_layer = layers[-1]
     assert len(np.setdiff1d(layers, 
         lasagne.layers.get_all_layers(final_layer))) == 0, ("All layers "
         "should be used, unused {:s}".format(str(np.setdiff1d(layers, 
         lasagne.layers.get_all_layers(final_layer)))))
     transformed = transform_to_normal_net(final_layer)
     return lasagne.layers.get_all_layers(transformed)
 
 
     
Example #2
0
 def get_layers(self):
     layers = get_layers(self.layers)
     final_layer = layers[-1]
     assert len(
         np.setdiff1d(
             layers, lasagne.layers.get_all_layers(final_layer))) == 0, (
                 "All layers "
                 "should be used, unused {:s}".format(
                     str(
                         np.setdiff1d(
                             layers,
                             lasagne.layers.get_all_layers(final_layer)))))
     transformed = transform_to_normal_net(final_layer)
     return lasagne.layers.get_all_layers(transformed)
Example #3
0
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors,
        use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size,
        learning_rate, n_min_trials, trial_start_offset, break_start_offset,
        break_stop_offset,
        pred_gap,
        incoming_port,load_old_data,use_new_adam_params,
        input_time_length,
        train_on_breaks,
        min_break_samples,
        min_trial_samples):
    setup_logging()
    assert np.little_endian, "Should be in little endian"
    train_params = None # for trainer, e.g. adam params
    if params_filename is not None:
        if params_filename == 'newest':
            # sort will already sort temporally with our time string format
            all_params_files = sorted(glob(base_name + ".*.model_params.npy"))
            assert len(all_params_files) > 0, ("Expect atleast one params file "
                "if 'newest' given as argument")
            params_filename = all_params_files[-1]
        log.info("Loading model params from {:s}".format(params_filename))
        params = np.load(params_filename)
        train_params_filename = params_filename.replace('model_params.npy',
            'trainer_params.npy')
        if os.path.isfile(train_params_filename):
            if use_new_adam_params:
                log.info("Loading trainer params from {:s}".format(train_params_filename))
                train_params = np.load(train_params_filename)
        else:
            log.warn("No train/adam params found, starting optimization params "
                "from scratch (model params will be loaded anyways).")
    else:
        params = np.load(base_name + '.npy')
    exp = create_experiment(base_name + '.yaml')
    
    # Possibly change input time length, for exmaple
    # if input time length very long during training and should be
    # shorter for online
    if input_time_length is not None:
        log.info("Change input time length to {:d}".format(input_time_length))
        set_input_window_length(exp.final_layer, input_time_length)
        # probably unnecessary, just for safety
        exp.iterator.input_time_length = input_time_length
    # Have to set for both exp final layer and actually used model
    # as exp final layer might be used for adaptation
    # maybe check this all for correctness?
    cnt_model = exp.final_layer
    set_param_values_backwards_compatible(cnt_model, params)
    prediction_model = transform_to_normal_net(cnt_model)
    set_param_values_backwards_compatible(prediction_model, params)
    
    data_processor = StandardizeProcessor(factor_new=1e-3)
    online_model = OnlineModel(prediction_model)
    if adapt_model:
        online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, 
            batch_size, learning_rate, n_min_trials, trial_start_offset,
            break_start_offset=break_start_offset,
            break_stop_offset=break_stop_offset,
            train_param_values=train_params,
            add_breaks=train_on_breaks,
            min_break_samples=min_break_samples,
            min_trial_samples=min_trial_samples)
    else:
        log.info("Not adapting model...")
        online_trainer = NoTrainer()
    coordinator = OnlineCoordinator(data_processor, online_model, online_trainer,
        pred_gap=pred_gap)
    hostname = ''
    server = PredictionServer((hostname, incoming_port), coordinator=coordinator,
        ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors,
        use_ui_server=use_ui_server, save_data=save_data,
        model_base_name=base_name, adapt_model=adapt_model)
    # Compilation takes some time so initialize trainer already
    # before waiting in connection in server
    online_trainer.initialize()
    if adapt_model and load_old_data:
        online_trainer.add_data_from_today(data_processor)
    log.info("Starting server on port {:d}".format(incoming_port))
    server.start()
    log.info("Started server")
    server.serve_forever()
Example #4
0
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors,
        use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size,
        learning_rate, n_min_trials, trial_start_offset, break_start_offset,
        break_stop_offset,
        pred_freq,
        incoming_port,load_old_data,use_new_adam_params,
        input_time_length):
    setup_logging()
    assert np.little_endian, "Should be in little endian"
    train_params = None # for trainer, e.g. adam params
    if params_filename is not None:
        if params_filename == 'newest':
            # sort will already sort temporally with our time string format
            all_params_files = sorted(glob(base_name + ".*.model_params.npy"))
            assert len(all_params_files) > 0, ("Expect atleast one params file "
                "if 'newest' given as argument")
            params_filename = all_params_files[-1]
        log.info("Loading model params from {:s}".format(params_filename))
        params = np.load(params_filename)
        train_params_filename = params_filename.replace('model_params.npy',
            'trainer_params.npy')
        if os.path.isfile(train_params_filename):
            if use_new_adam_params:
                log.info("Loading trainer params from {:s}".format(train_params_filename))
                train_params = np.load(train_params_filename)
        else:
            log.warn("No train/adam params found, starting optimization params "
                "from scratch (model params will be loaded anyways).")
    else:
        params = np.load(base_name + '.npy')
    exp = create_experiment(base_name + '.yaml')
    
    # Possibly change input time length, for exmaple
    # if input time length very long during training and should be
    # shorter for online
    if input_time_length is not None:
        log.info("Change input time length to {:d}".format(input_time_length))
        set_input_window_length(exp.final_layer, input_time_length)
        # probably unnecessary, just for safety
        exp.iterator.input_time_length = input_time_length
    # Have to set for both exp final layer and actually used model
    # as exp final layer might be used for adaptation
    # maybe check this all for correctness?
    cnt_model = exp.final_layer
    lasagne.layers.set_all_param_values(cnt_model, params)
    prediction_model = transform_to_normal_net(cnt_model)
    lasagne.layers.set_all_param_values(prediction_model, params)
    
    data_processor = StandardizeProcessor(factor_new=1e-3)
    online_model = OnlineModel(prediction_model)
    if adapt_model:
        online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, 
            batch_size, learning_rate, n_min_trials, trial_start_offset,
            break_start_offset=break_start_offset,
            break_stop_offset=break_stop_offset,
            train_param_values=train_params)
    else:
        log.info("Not adapting model...")
        online_trainer = NoTrainer()
    coordinator = OnlineCoordinator(data_processor, online_model, online_trainer,
        pred_freq=pred_freq)
    hostname = ''
    server = PredictionServer((hostname, incoming_port), coordinator=coordinator,
        ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors,
        use_ui_server=use_ui_server, save_data=save_data,
        model_base_name=base_name, adapt_model=adapt_model)
    # Compilation takes some time so initialize trainer already
    # before waiting in connection in server
    online_trainer.initialize()
    if adapt_model and load_old_data:
        online_trainer.add_data_from_today(data_processor)
    log.info("Starting server on port {:d}".format(incoming_port))
    server.start()
    log.info("Started server")
    server.serve_forever()