def setUp(self): self.ctrl = Controller() self.ctrl.load_dataset(Dataset.PENDIGITS) pd_model = ModelPendigits(self.ctrl, "test") self.ctrl.register_model(pd_model) self.dset = self.ctrl._dataset # type: DatasetPendigits self.pd_model = self.ctrl._model
def setUp(self): # set of observations self.ctrl = Controller() # type: Controller #self.hmm_model = ModelHMM(self.ctrl) #self.hmm_model = ModelHMM_scaled(self.ctrl) #self.hmm_model = ModelHMM_log(self.ctrl) self.hmm_model = ModelHMM_log_scaled(self.ctrl)
class TestHomeassistant(unittest.TestCase): def setUp(self): # set of observations self.ctrl = Controller() self.ctrl.set_dataset(Dataset.HASS) self.ctrl.load_dataset() self.hass_obj = self.ctrl._dataset #type: DatasetHomeassistant # algo = self.get_sel_algorithm() # dataset = self.get_dataset_by_name(dataset_name) # ctrl, dk, hmm_model = self._init_model_on_dataset(algo, dataset) # ctrl.set_dataset(dk) # # if dataset_name == DATASET_NAME_HASS: # # get activities # act_list = [] # for act in Activity.objects.all(): # act_list.append(act.name) # dev_list = [] # for dev in Device.objects.all(): # if dev.location is not None: # dev_list.append(dev.component.name + "." + dev.name) # print('*'*100) # print(act_list) # print(dev_list) # print('*'*100) # ctrl.set_custom_state_list(act_list) # ctrl.set_custom_obs_list(dev_list) # # ctrl.load_dataset() # ctrl.register_model(hmm_model) def tearDown(self): pass def test_load_data(self): pass def test_print_hass_df(self): df = self.hass_obj._df print(DatasetHomeassistant.format_mat_full(df)) def test_hashmaps(self): print(self.hass_obj.get_state_lbl_hashmap()) print(self.hass_obj.get_obs_lbl_hashmap()) def test_get_train_seqs(self): tr_seqs = self.hass_obj.get_train_seq() print(tr_seqs) print(self.hass_obj.decode_obs_seq(tr_seqs)) def test_get_test_seq(self): test_seqs = self.hass_obj._test_seqs print(test_seqs) lbl_seqs, obs_seqs = self.hass_obj.get_test_labels_and_seq() print('-' * 20) print('lbl_seqs: ', lbl_seqs) print('obs_seqs: ', obs_seqs)
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) # load model model_name = 'bhmm' ctrl.load_model(MODEL_FILE_PATH, model_name) ctrl.create_model_agnostics(model_name) file_path = MODEL_FOLDER_PATH + '/' + MODEL_NAME + '.feature_importance.png' ctrl.save_plot_feature_importance(model_name, file_path)
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) print() # load model ctrl.load_model(MODEL_FILE_PATH, 'bhmm') ctrl.load_model(MODEL_FILE_PATH2, 'bhsmm') state_sel = ['dental_care', 'outside_activity', 'learning'] #state_sel = ['dental_care', 'enter_home', 'kitchen_social_activity'] img_file_path = MODEL_FOLDER_PATH + '/' + 'comparision.png' ctrl.compare_dur_dists_and_save('bhmm', 'bhsmm', state_sel, img_file_path)
def _create_dataset(self, datainstance): db_path = self._create_hass_db_path() activity_file_path = self._create_activity_file_path(person) ctrl = Controller(path_to_config=settings.HASSBRAIN_ALGO_CONFIG, config={'datasets': { datainstance.name: db_path }}) params = { 'repr': DataRep(datainstance.data_rep), 'data_format': 'bernoulli', 'test_selection': datainstance.test_sel, 'freq': datainstance.timeslicelength } ctrl.set_dataset(data_name=datainstance.name, data_type=Datasettype.HASS, params=params)
def _create_ctrl_for_normal_dataset(self, algorithm, dataset): """ generates an instance of controller and model :param algorithm: :param dataset: :return: """ from hassbrain_algorithm.controller import Controller ctrl = Controller(settings.HASSBRAIN_ALGO_CONFIG) # type: Controller dk = self.train_get_ds_type_by_name(dataset.class_name) ctrl.set_dataset(dk) model_object = self._create_class_from_name(ctrl, algorithm) ctrl.load_dataset() ctrl.register_model(model_object) return ctrl
def main(): dk = DK data_name = DATA_NAME # set of observations ctrl_config = { 'datasets': { 'kasteren': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/kasteren/config.yaml' }, 'hass_testing': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_testing/config.yaml' }, 'hass_testing2': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_testing2/config.yaml' }, 'hass_chris': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_chris/config.yaml' }, 'hass_chris_final': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_chris_final/config.yaml' } } } ctrl = Controller(config=ctrl_config) params = { 'repr': DATA_TYPE, 'data_format': 'bernoulli', 'test_selection': TEST_SEL, 'test_day': TEST_DAY, 'freq': TIME_DIFF, 'include_idle': False } ctrl.set_dataset(data_name=data_name, data_type=dk, params=params) dataset = ctrl._dataset # type: _Dataset join_two_seperate_databses(dataset) load_rest(dataset) #act_stats = dataset.get_act_stats() #dev_stats = dataset.get_dev_stats() ctrl.save_dataset(DATASET_FILE_PATH) #print('filepath: ', DATASET_FILE_PATH) print('*' * 10)
def main(): dk = DK data_name = DATA_NAME # set of observations ctrl_config = { 'datasets': { 'kasteren': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/kasteren/config.yaml' }, 'hass_testing': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_testing/config.yaml' }, 'hass_testing2': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_testing2/config.yaml' }, 'hass_chris': { 'path_to_config': '/home/cmeier/code/data/hassbrain/datasets/hass_chris/config.yaml' } } } ctrl = Controller(config=ctrl_config) params = { 'repr': DATA_TYPE, 'data_format': 'bernoulli', 'test_selection': TEST_SEL, 'test_day': TEST_DAY, 'freq': TIME_DIFF } ctrl.set_dataset(data_name=data_name, data_type=dk, params=params) ctrl.load_dataset() tmp1 = ctrl._dataset.get_dev_stats() tmp2 = ctrl._dataset.get_act_stats() #ctrl.save_dataset(DATASET_FILE_PATH) #print('filepath: ', DATASET_FILE_PATH) print('*' * 10)
def _create_ctrl_for_hass_instance(self, algorithm, person, datainstance, model_name): from hassbrain_algorithm.controller import Controller """ the case where the data should be loaded from the active homeassistant instance """ ctrl = Controller(path_to_config=settings.HASSBRAIN_ALGO_CONFIG) ctrl.load_dataset_from_file(datainstance.data_file) model_object = self._create_class_from_name(ctrl, algorithm) self._set_custom_act_n_dev(ctrl) ctrl.register_model(model_object, model_name) #if algorithm.location: # loc_data = AlgorithmView.get_location_data() # ctrl.register_location_info(loc_data) #if algorithm.synthetic_activities: # act_data = AlgorithmView.get_activity_data(algorithm.selected_person) # ctrl.register_activity_info(act_data) return ctrl
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) #hmm_model = BHMMTestModel(ctrl) #hmm_model = BHSMMTestModel(ctrl) from hassbrain_algorithm.models.hmm.bhmm_hp import BernoulliHMM_HandcraftedPriors hmm_model = BernoulliHMM_HandcraftedPriors(ctrl) ctrl.register_model(hmm_model, MODEL_NAME) # load domain knowledge path = '/home/cmeier/code/data/hassbrain/datasets/hass_chris_final/data/domain_knowledge.json' act_data, loc_data = load_domain_knowledge(path) ctrl.register_location_info(MODEL_NAME, loc_data) ctrl.register_activity_info(MODEL_NAME, act_data) # load model #ctrl.load_model(MODEL_FILE_PATH, MODEL_NAME) from scripts.test_model import BHSMMTestModel ctrl.register_benchmark(MODEL_NAME) ctrl.init_model_on_dataset(MODEL_NAME) ctrl.register_loss_file_path(MD_LOSS_FILE_PATH, MODEL_NAME) ctrl.train_model(MODEL_NAME) # bench the model reports = ctrl.bench_models() # save metrics ctrl.save_df_metrics_to_file(MODEL_NAME, MD_METRICS_FILE_PATH) ctrl.save_df_confusion(MODEL_NAME, MD_CONF_MAT_FILE_PATH) ctrl.save_df_act_dur_dists(MODEL_NAME, MD_ACT_DUR_DISTS_DF_FILE_PATH, DATA_ACT_DUR_DISTS_DF_FILE_PATH) # plots ctrl.save_plot_trainloss(MD_LOSS_IMG_FILE_PATH, MODEL_NAME) ctrl.plot_and_save_inferred_states(MD_INFST_IMG_FILE_PATH, MODEL_NAME) ctrl.save_plot_act_dur_dists(MODEL_NAME, MD_ACT_DUR_DISTS_IMG_FILE_PATH, DATA_ACT_DUR_DISTS_IMG_FILE_PATH)
class TestPendigitsModel(unittest.TestCase): def setUp(self): self.ctrl = Controller() self.ctrl.load_dataset(Dataset.PENDIGITS) pd_model = ModelPendigits(self.ctrl, "test") self.ctrl.register_model(pd_model) self.dset = self.ctrl._dataset # type: DatasetPendigits self.pd_model = self.ctrl._model def test_init_hmms(self): self.ctrl.init_model_on_dataset() def test_save_hmms(self): self.ctrl.init_model_on_dataset() self.ctrl.save_model() def test_load_hmms(self): self.ctrl.load_model() # attention manually refresh reference self.pd_model = self.ctrl._model def test_train_hmms(self): self.ctrl.init_model_on_dataset() self.ctrl.train_model() def test_train_n_save_hmms(self): self.ctrl.init_model_on_dataset() self.ctrl.train_model() self.ctrl.save_model() def test_pre_bench_hmms(self): self.ctrl.load_model() self.pd_model = self.ctrl._model y_true, y_pred = self.pd_model.create_pred_act_seqs(self.dset) def test_bench_hmms(self): self.ctrl.load_model() self.pd_model = self.ctrl._model self.ctrl.register_benchmark() rep = self.ctrl.create_report(conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True) print(rep) def test_generate_obs(self): self.ctrl.load_model() self.pd_model = self.ctrl._model self.ctrl.generate_observations() def test_generate_obs_n_plot(self): # numbers that were good in benchmark (desc): # [0,8,4,9,3,7] self.ctrl.load_model() ds = self.ctrl._dataset # type: DatasetPendigits pdmod = self.ctrl._model # type: ModelPendigits num = 1 zero_start = [8, 1, 1] one_start = [8, 2, 1, 1, 1] start_seq = one_start hmm = pdmod._model_dict[num] hmm.set_format_full(True) print(hmm) pdmod.select_number(num) seq = pdmod.generate_observations(start_seq) ds.plot_obs_seq(seq, num) def tearDown(self): pass
class TestHomeassistantModelHMMLogScaled(unittest.TestCase): # Model part def setUp(self): # set of observations self.ctrl = Controller() self.ctrl.set_dataset(Dataset.HASS_TESTING) self.hass_obj = self.ctrl._dataset #type: DatasetHomeassistant self.hmm_model = PreConfHMM(self.ctrl) def tearDown(self): pass def test_load_custom_lists_modelHMM(self): custom_state_list = ['sleeping', 'cooking'] custom_obs_list = [ 'binary_sensor.motion_bed', 'binary_sensor.motion_mirror', 'binary_sensor.motion_pc', 'switch.test_switch_1', 'light.test_light' ] hmm_model = self.hmm_model self.ctrl.set_custom_state_list(custom_state_list) self.ctrl.set_custom_obs_list(custom_obs_list) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) def test_load_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) print(self.hass_obj.get_obs_lbl_hashmap()) print(self.hass_obj.get_state_lbl_hashmap()) def test_train_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) self.ctrl.train_model() print(self.ctrl._model) def test_bench_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) self.ctrl.register_benchmark() self.ctrl.train_model() print(self.ctrl._model) report = self.ctrl.create_report( conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True ) print(report) def test_classify(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('-'*10) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] pred_state = hmm_model.classify(obs_seq) print('#'*100) print(pred_state) def test_classify_multi(self): """ used to test for classification of multiple labels """ self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('-'*10) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1)]#, ('binary_sensor.motion_bed', 0)] act_state_dict = hmm_model.classify_multi(obs_seq) print('#'*100) print(act_state_dict) #print(act_state_dict) #print(hmm_model.get_state_label_list()) def test_pred_next_obs_single(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) #print(hmm_model) print('#'*100) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] tupel = hmm_model.predict_next_obs(obs_seq) print(tupel) def test_pred_next_obs_multi(self): hmm_model = self.hmm_model self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('#'*100) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] #arr = hmm_model.predict_next_obs_arr(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) res_dict = hmm_model.predict_prob_xnp1(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) res_dict = hmm_model.predict_prob_xnp1(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) print('#'*100) print(res_dict) def test_encode_loc_data(self): loc_data = [ { "name" : "loc1", "activities" : ['cooking'], "devices" : ['binary_sensor.motion_hallway', 'binary_sensor.motion_mirror'], }, {"name" : "loc2", "activities" : ['cooking', 'eating'], "devices" : [], }, {"name" : "loc3", "activities" : ['sleeping'], "devices" : ['binary_sensor.motion_bed'], }, ] hmm_model = self.hmm_model self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) #print('state_hm: ', hmm_model._state_lbl_hashmap) #print('obs_hm: ', hmm_model._obs_lbl_hashmap) #print('raw_loc_data: \t' + str(loc_data)) enc_loc_data = hmm_model._encode_location_data(loc_data) #print('#'*100) #print('enc_loc_data: \t' + str(enc_loc_data)) def test_encode_act_data(self): act_data = [ {"name" : "cooking", "day_of_week" : 2, "start" : datetime.time.fromisoformat("06:15:00"), "end" : datetime.time.fromisoformat("08:45:00") }, {"name" : "eating", "day_of_week" : 1, "start" : datetime.time.fromisoformat("06:15:00"), "end" : datetime.time.fromisoformat("08:45:00") }, {"name" : "eating", "day_of_week" : 1, "start" : datetime.time.fromisoformat("08:46:00"), "end" : datetime.time.fromisoformat("10:00:00") }, ] hmm_model = self.hmm_model self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() print('raw_act_data: \t' + str(act_data)) print('state_hm: ', hmm_model._state_lbl_hashmap) print('obs_hm: ', hmm_model._obs_lbl_hashmap) print('#'*100) enc_act_data = hmm_model._encode_act_data(act_data) print('enc_act_data: \t' + str(enc_act_data)) def test_init(self): loc_data = [ { "name" : "loc1", "activities" : ['cooking'], "devices" : ['binary_sensor.motion_hallway', 'binary_sensor.motion_mirror'], }, {"name" : "loc2", "activities" : ['cooking', 'eating'], "devices" : [], }, {"name" : "loc3", "activities" : ['sleeping'], "devices" : ['binary_sensor.motion_bed'], }, ] act_data = [ {"name" : "sleeping", "day_of_week" : 0, "start" : datetime.time.fromisoformat("04:00:00"), "end" : datetime.time.fromisoformat("06:15:00") }, {"name" : "cooking", "day_of_week" : 0, "start" : datetime.time.fromisoformat("06:15:00"), "end" : datetime.time.fromisoformat("08:45:00") }, {"name" : "eating", "day_of_week" : 0, "start" : datetime.time.fromisoformat("08:46:00"), "end" : datetime.time.fromisoformat("10:00:00") }, {"name" : "sleeping", "day_of_week" : 0, "start" : datetime.time.fromisoformat("12:00:00"), "end" : datetime.time.fromisoformat("13:00:00") }, {"name" : "sleeping", "day_of_week" : 1, "start" : datetime.time.fromisoformat("02:00:00"), "end" : datetime.time.fromisoformat("06:30:00") }, {"name" : "cooking", "day_of_week" : 1, "start" : datetime.time.fromisoformat("12:00:00"), "end" : datetime.time.fromisoformat("13:00:00") }, {"name" : "cooking", "day_of_week" : 2, "start" : datetime.time.fromisoformat("19:00:00"), "end" : datetime.time.fromisoformat("00:00:00") }, {"name" : "cooking", "day_of_week" : 2, "start" : datetime.time.fromisoformat("23:00:00"), "end" : datetime.time.fromisoformat("00:00:00") }, {"name" : "sleeping", "day_of_week" : 2, "start" : datetime.time.fromisoformat("00:00:00"), "end" : datetime.time.fromisoformat("03:00:00") }, ] hmm_model = self.hmm_model self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.register_location_info(loc_data) print('raw_act_data: \t' + str(act_data)) print('state_hm: ', hmm_model._state_lbl_hashmap) #print('obs_hm: ', hmm_model._obs_lbl_hashmap) self.ctrl.register_activity_info(act_data) #print('#'*100) #enc_act_data = hmm_model._encode_act_data(act_data) #print('enc_act_data: \t' + str(enc_act_data)) self.ctrl.init_model_on_dataset() hmm = hmm_model._hmm hmm.set_format_full(True) self.assertAlmostEqual(1.0, Probs.prob_to_norm(hmm._pi.sum()), 6) self.assertTrue(hmm.verify_emission_matrix()) self.assertTrue(hmm.verify_transition_matrix())
class TestController(unittest.TestCase): def setUp(self): # set of observations self.ctrl = Controller() # type: Controller #self.hmm_model = ModelHMM(self.ctrl) #self.hmm_model = ModelHMM_scaled(self.ctrl) #self.hmm_model = ModelHMM_log(self.ctrl) self.hmm_model = ModelHMM_log_scaled(self.ctrl) def tearDown(self): pass def test_ctrl_get_bench_metrics(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() self.ctrl.register # render #dot = self.ctrl.render_model() #dot.render('test.gv', view=True) self.ctrl.train_model(True) print(self.ctrl.get_bench_metrics()) def test_ctrl_train_seqs(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() # render #dot = self.ctrl.render_model() #dot.render('test.gv', view=True) self.ctrl.train_model() report = self.ctrl.create_report(conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True) print(self.ctrl._model) print(report) #self.ctrl.show_plot() def test_ctrl_presentation(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() # render #dot = self.ctrl.render_model() #dot.render('test.gv', view=True) self.ctrl.train_model(True) report = self.ctrl.create_report(conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True) print(self.ctrl._model) print(report) #self.ctrl.show_plot() # render #dot = self.ctrl.render_model() #dot.render('test.gv', view=True) def test_generate_visualization(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print('state_label_hm: ', hmm_model._state_lbl_hashmap) print('state_label_rev_hm: ', hmm_model._state_lbl_rev_hashmap) self.ctrl.save_visualization_to_file( '/home/cmeier/code/tmp/visualization.png') def test_bench_train_loss(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() self.ctrl.register_loss_file_path( '/home/cmeier/code/tmp/kasteren/train_loss.log') self.ctrl.train_model() self.ctrl.save_model( '/home/cmeier/code/tmp/kasteren/kasteren_model.joblib') def test_bench_reports_conf_matrix(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() self.ctrl.train_model() report = self.ctrl.create_report(conf_matrix=True) print(report) def test_bench_reports(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() self.ctrl.train_model() report = self.ctrl.create_report(conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True) print(report) #self.ctrl.show_plot() def test_bench_q_fct(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.register_benchmark() print(self.ctrl._model) # use dataset Kasteren and q_fct self.ctrl.train_model(True) report = self.ctrl.create_report() print(self.ctrl._model) print(report) #self.ctrl.show_plot() def test_om(self): #plt.figure(figsize=(10,6)) #self.pom.plot() hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() dot = self.ctrl.render_model() dot.render('test.gv', view=True) def test_train_model(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() #print(hmm_model._hmm) self.ctrl.train_model() print('#' * 200) print(hmm_model) hmm_model._hmm.set_str_exp(True) hmm_model._hmm.set_format_full(True) print(hmm_model) #print(hmm_model._hmm.verify_transition_matrix()) #print(hmm_model._hmm.verify_emission_matrix()) #self._bench._model.draw() #self.ctrl.register_benchmark() #report = self.ctrl.create_report(True, True, True, True, True) #print(report) #self._bench.show_plot() def test_save_model(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.load_dataset(dk) self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) self.ctrl.train_model() self.ctrl.save_model() def test_init_model(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) def test_load_model(self): hmm_model = self.hmm_model dk = Dataset.KASTEREN self.ctrl.set_dataset(dk) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.load_model() print(self.ctrl._model)
def setUp(self): # set of observations self.ctrl = Controller() # type: Controller self.hmm_model = PomHMM(self.ctrl)
class TestHomeassistantModelHMMLogScaled(unittest.TestCase): # Model part def setUp(self): # set of observations self.ctrl = Controller() self.ctrl.set_dataset(Dataset.HASS_TESTING) self.hass_obj = self.ctrl._dataset #type: DatasetHomeassistant self.hmm_model = HMMForward(self.ctrl) def tearDown(self): pass def test_load_custom_lists_modelHMM(self): custom_state_list = ['sleeping', 'cooking'] custom_obs_list = [ 'binary_sensor.motion_bed', 'binary_sensor.motion_mirror', 'binary_sensor.motion_pc', 'switch.test_switch_1', 'light.test_light' ] hmm_model = self.hmm_model self.ctrl.set_custom_state_list(custom_state_list) self.ctrl.set_custom_obs_list(custom_obs_list) self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) def test_load_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) print(self.hass_obj.get_obs_lbl_hashmap()) print(self.hass_obj.get_state_lbl_hashmap()) def test_train_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) self.ctrl.train_model() print(self.ctrl._model) def test_bench_modelHMM(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() hmm_model._hmm.set_format_full(True) print(self.ctrl._model) self.ctrl.register_benchmark() self.ctrl.train_model() print(self.ctrl._model) report = self.ctrl.create_report(conf_matrix=True, accuracy=True, precision=True, recall=True, f1=True) print(report) def test_classify(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('-' * 10) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] pred_state = hmm_model.classify(obs_seq) print('#' * 100) print(pred_state) def test_classify_multi(self): """ used to test for classification of multiple labels """ self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('-' * 10) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1) ] #, ('binary_sensor.motion_bed', 0)] act_state_dict = hmm_model.classify_multi(obs_seq) print('#' * 100) print(act_state_dict) #print(act_state_dict) #print(hmm_model.get_state_label_list()) def test_pred_next_obs_single(self): self.ctrl.load_dataset() hmm_model = self.hmm_model self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) #print(hmm_model) print('#' * 100) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] tupel = hmm_model.predict_next_obs(obs_seq) print(tupel) def test_pred_next_obs_multi(self): hmm_model = self.hmm_model self.ctrl.load_dataset() self.ctrl.register_model(hmm_model) self.ctrl.init_model_on_dataset() self.ctrl.train_model() hmm_model._hmm.set_format_full(True) print(hmm_model) print('#' * 100) obs_seq = [('binary_sensor.motion_bed', 0), ('binary_sensor.motion_mirror', 1), ('binary_sensor.motion_bed', 0)] #arr = hmm_model.predict_next_obs_arr(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) res_dict = hmm_model.predict_prob_xnp1(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) res_dict = hmm_model.predict_prob_xnp1(obs_seq) print(hmm_model._obs_lbl_hashmap) print(hmm_model._obs_lbl_rev_hashmap) print('#' * 100) print(res_dict)
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) if MODE == MODE_TRAIN: from scripts.test_model import BHSMMTestModel from hassbrain_algorithm.models.hmm.bhmm_hp import BernoulliHMM_HandcraftedPriors from hassbrain_algorithm.models.tads import TADS if MODEL_CLASS == BHMM: hmm_model = BHMMTestModel(ctrl) elif MODEL_CLASS == BHSMM: hmm_model = BHSMMTestModel(ctrl) hmm_model.set_training_steps(50) elif MODEL_CLASS == BHMMPC: hmm_model = BernoulliHMM_HandcraftedPriors(ctrl) elif MODEL_CLASS == MCTADS: hmm_model = TADS(ctrl) else: raise ValueError ctrl.register_model(hmm_model, MODEL_NAME) # load domain knowledge if MODEL_CLASS == BHMMPC: path = '/home/cmeier/code/data/hassbrain/datasets/hass_chris_final/data/domain_knowledge.json' act_data, loc_data = load_domain_knowledge(path) ctrl.register_location_info(MODEL_NAME, loc_data) ctrl.register_activity_info(MODEL_NAME, act_data) # load model elif MODE == MODE_BENCH: ctrl.load_model(MODEL_FILE_PATH, MODEL_NAME) else: raise ValueError ctrl.register_benchmark(MODEL_NAME) ctrl.init_model_on_dataset(MODEL_NAME) if MODE == MODE_TRAIN: ctrl.register_loss_file_path(MD_LOSS_FILE_PATH, MODEL_NAME) ctrl.train_model(MODEL_NAME) ctrl.save_model(MODEL_FILE_PATH, MODEL_NAME) # bench the model reports = ctrl.bench_models() # save metrics ctrl.save_df_metrics_to_file(MODEL_NAME, MD_METRICS_FILE_PATH) ctrl.save_df_confusion(MODEL_NAME, MD_CONF_MAT_FILE_PATH) ctrl.save_df_act_dur_dists(MODEL_NAME, MD_ACT_DUR_DISTS_DF_FILE_PATH, DATA_ACT_DUR_DISTS_DF_FILE_PATH) ctrl.save_df_class_accs(MODEL_NAME, MD_CLASS_ACTS_FILE_PATH) # plots if MODE == MODE_TRAIN and MODEL_CLASS != MCTADS: ctrl.save_plot_trainloss(MODEL_NAME, MD_LOSS_IMG_FILE_PATH) ctrl.save_plot_inferred_states(MODEL_NAME, MD_INFST_IMG_FILE_PATH) ctrl.save_plot_act_dur_dists([MODEL_NAME], MD_ACT_DUR_DISTS_IMG_FILE_PATH)
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) from scripts.test_model import BHSMMTestModel from hassbrain_algorithm.models.hmm.bhmm_hp import BernoulliHMM_HandcraftedPriors from hassbrain_algorithm.models.tads import TADS if MODEL_CLASS == BHMM: hmm_model = BHMMTestModel(ctrl) hmm_model.set_training_steps(10) elif MODEL_CLASS == BHSMM: hmm_model = BHSMMTestModel(ctrl) hmm_model.set_training_steps(50) elif MODEL_CLASS == BHMMPC: hmm_model = BernoulliHMM_HandcraftedPriors(ctrl) elif MODEL_CLASS == MCTADS: hmm_model = TADS(ctrl) else: raise ValueError ctrl.register_model(hmm_model, MODEL_NAME) ctrl.register_benchmark(MODEL_NAME) ctrl.init_model_on_dataset(MODEL_NAME) ctrl.register_loss_file_path(MD_LOSS_FILE_PATH, MODEL_NAME) ctrl.train_model(MODEL_NAME) ctrl.save_model(MODEL_FILE_PATH, MODEL_NAME) # bench the model params = { 'metrics': False, 'act_dur_dist': True, 'feature_importance': False } reports = ctrl.bench_models(**params) # save metrics #ctrl.save_df_metrics_to_file(MODEL_NAME, MD_METRICS_FILE_PATH) #ctrl.save_df_confusion(MODEL_NAME, MD_CONF_MAT_FILE_PATH) #ctrl.save_df_act_dur_dists(MODEL_NAME, MD_ACT_DUR_DISTS_DF_FILE_PATH, # DATA_ACT_DUR_DISTS_DF_FILE_PATH) #ctrl.save_df_class_accs(MODEL_NAME, MD_CLASS_ACTS_FILE_PATH) dict = { #'feature_importance' : MD_FEATURE_IMP_PLT_FILE_PATH, 'train_loss': MD_LOSS_IMG_FILE_PATH, #'inf_states' : MD_INFST_IMG_FILE_PATH, 'act_dur': MD_ACT_DUR_DISTS_IMG_FILE_PATH } ctrl.save_plots(MODEL_NAME, dict)
class TestDatasetPendigits(unittest.TestCase): def setUp(self): # set of observations self.cm = Controller() self.cm.load_dataset(Dataset.PENDIGITS) self.pd = self.cm._dataset # type: DatasetPendigits def tearDown(self): pass def test_plotting(self): self.pd.load_data() self.pd.plot_example(12) def test_create_train_sequences(self): self.pd.load_data() enc_data, lengths = self.pd._create_train_seq(1) def test_get_double_strokes(self): self.pd.load_data() digit_data = self.pd._get_train_data_where_double_strokes(9) cnt = 0 for example in digit_data: cnt += 1 self.pd._plotUniPenData(example) if cnt == 50: break def test_train_seq_number(self): self.pd.load_data() for numn in range(0, 40): digit_data = self.pd._get_train_data_by_number(numn) for ex in range(0, 10): print('~' * 100) example = digit_data[ex + 5] print(example) self.pd._plotUniPenData(example) def test_create_test_sequences(self): self.pd.load_data() enc_data, lengths = self.pd._create_test_seq(0) self.assertEqual(59, len(enc_data)) self.assertEqual(59, lengths[0]) def test_generate_points_plot_0(self): seq = [ 8, 1, 1, 0, 0, 0, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 9, 8, 1, 1, 1, 1, 1, 1 ] self.pd.plot_obs_seq(seq, 0) def test_generate_points_plot_1(self): seq = [ 8, 2, 1, 1, 1, 1, 1, 0, 6, 5, 5, 5, 5, 6, 5, 9, 8, 0, 0, 0, 0, 0, 0, 0 ] self.pd.plot_obs_seq(seq, 1) def test_generate_points_plot_4(self): seq = [ 8, 6, 6, 6, 6, 6, 6, 6, 7, 7, 0, 0, 1, 0, 7, 0, 0, 0, 9, 8, 6, 6, 6, 6, 6, 6, 6 ] self.pd.plot_obs_seq(seq, 4) def test_generate_points_plot_5(self): seq = [ 8, 6, 6, 6, 6, 6, 7, 0, 0, 0, 0, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 9, 8, 0, 0, 0, 0, 0, 0 ] self.pd.plot_obs_seq(seq, 5) def test_generate_points_plot_7(self): seq = [ 8, 0, 0, 0, 0, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 9, 8, 0, 0, 0, 0, 0, 0 ] self.pd.plot_obs_seq(seq, 7) def test_generate_points_plot_8(self): seq = [ 8, 2, 1, 1, 0, 0, 7, 7, 6, 6, 6, 5, 5, 4, 4, 3, 3, 2, 9, 8, 2, 1, 1, 0, 0, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2 ] self.pd.plot_obs_seq(seq, 8) def test_generate_points_plot_9(self): seq = [ 8, 3, 4, 5, 6, 6, 7, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 9, 8, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3 ] self.pd.plot_obs_seq(seq, 9) def test_directions_to_points(self): stepsize = 5 self.pd._resolution = 8 x0 = self.pd._new_point_x(0, 0, stepsize) y0 = self.pd._new_point_y(0, 0, stepsize) x1 = self.pd._new_point_x(0, 1, stepsize) y1 = self.pd._new_point_y(0, 1, stepsize) x2 = self.pd._new_point_x(0, 2, stepsize) y2 = self.pd._new_point_y(0, 2, stepsize) x3 = self.pd._new_point_x(0, 3, stepsize) y3 = self.pd._new_point_y(0, 3, stepsize) x4 = self.pd._new_point_x(0, 4, stepsize) y4 = self.pd._new_point_y(0, 4, stepsize) x5 = self.pd._new_point_x(0, 5, stepsize) y5 = self.pd._new_point_y(0, 5, stepsize) x6 = self.pd._new_point_x(0, 6, stepsize) y6 = self.pd._new_point_y(0, 6, stepsize) x7 = self.pd._new_point_x(0, 7, stepsize) y7 = self.pd._new_point_y(0, 7, stepsize) self.assertEqual(5.000, round(x0, 3)) self.assertEqual(0.000, round(y0, 3)) self.assertEqual(3.536, round(x1, 3)) self.assertEqual(3.536, round(y1, 3)) self.assertEqual(0.000, round(x2, 3)) self.assertEqual(5.000, round(y2, 3)) self.assertEqual(-3.536, round(x3, 3)) self.assertEqual(3.536, round(y3, 3)) self.assertEqual(-5.000, round(x4, 3)) self.assertEqual(0.000, round(y4, 3)) self.assertEqual(-3.536, round(x5, 3)) self.assertEqual(-3.536, round(y5, 3)) self.assertEqual(0.000, round(x6, 3)) self.assertEqual(-5.000, round(y6, 3)) self.assertEqual(3.536, round(x7, 3)) self.assertEqual(-3.536, round(y7, 3)) def test_points_to_direction(self): # directions 0 - 7 # number of classes the direction can have c = 8 ## ---- 0 degree direc = self.pd._points_to_direction(c, 0, 0, 1, 0) self.assertEqual(0, direc) ## ---- 45 degree direc = self.pd._points_to_direction(c, 0, 0, 1, 1) self.assertEqual(1, direc) ## ---- 90 degree direc = self.pd._points_to_direction(c, 0, 0, 0, 1) self.assertEqual(2, direc) ## ---- 135 degree direc = self.pd._points_to_direction(c, 0, 0, -1, 1) self.assertEqual(3, direc) ## ---- 180 degree direc = self.pd._points_to_direction(c, 0, 0, -1, 0) self.assertEqual(4, direc) ## ---- 225 degree direc = self.pd._points_to_direction(c, 0, 0, -1, -1) self.assertEqual(5, direc) ## ---- 270 degree direc = self.pd._points_to_direction(c, 0, 0, 0, -1) self.assertEqual(6, direc) ## ---- 315 degree direc = self.pd._points_to_direction(c, 0, 0, 1, -1) self.assertEqual(7, direc) # random other angles # ---- 52 degree direc = self.pd._points_to_direction(c, 0, 0, 0.61, 0.79) self.assertEqual(1, direc) ## ---- 100 degree direc = self.pd._points_to_direction(c, 0, 0, -0.18, 0.98) self.assertEqual(2, direc) # ---- 291 degree direc = self.pd._points_to_direction(c, 0, 0, 0.36, -0.93) self.assertEqual(6, direc) # ----- 350 degree direc = self.pd._points_to_direction(c, 0, 0, 0.98, -0.17) self.assertEqual(0, direc) def test_points_to_direction_2(self): c = 8 # check if direction correct after pen is set on table or # after pen is removed from table xp = 0 yp = 0 # ----- 350 degree direc = self.pd._points_to_direction(c, xp, yp, 0.98, -0.17) self.assertEqual(0, direc)
def main(): # set of observations ctrl = Controller() # todo this is only for testing interpretability from scripts.test_model import BHMMTestModel from scripts.test_model import BHSMMTestModel #hmm_model = BHMMTestModel(ctrl) hmm_model = BHSMMTestModel(ctrl) ctrl.load_dataset_from_file(DATASET_FILE_PATH) ctrl.register_model(hmm_model, MODEL_NAME) ctrl.register_benchmark(MODEL_NAME) ctrl.init_model_on_dataset(MODEL_NAME) #ctrl.register_loss_file_path(MD_LOSS_FILE_PATH, MODEL_NAME) ctrl.train_model(MODEL_NAME) print(MODEL_FILE_PATH) ctrl.save_model(MODEL_FILE_PATH, MODEL_NAME) print()
def setUp(self): # set of observations self.ctrl = Controller() self.ctrl.set_dataset(Dataset.HASS) self.ctrl.load_dataset() self.hass_obj = self.ctrl._dataset #type: DatasetHomeassistant
def main(): ctrl = Controller() ctrl.load_dataset_from_file(DATASET_FILE_PATH) # load model model_name = 'bhmm' ctrl.load_model(MODEL_FILE_PATH, model_name) ctrl.create_model_agnostics(model_name) hm = ctrl.get_model(model_name)._obs_lbl_hashmap file_path = MODEL_FOLDER_PATH + '/' + MODEL_NAME + '.lime_learning.png' file_paths = [file_path] labels_to_explain = ['learning'] import numpy as np # typical for activation pattern for dental care raw_feature_learning = np.array( [0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1], dtype=bool) ch_feature_learning = np.array( [0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool) lf_feature_learning = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool) data = ctrl._dataset # type: _Dataset ctrl.plot_and_save_explanation(model_name, ch_feature_learning, labels_to_explain, file_paths)
def setUp(self): # set of observations self.cm = Controller() self.cm.load_dataset(Dataset.PENDIGITS) self.pd = self.cm._dataset # type: DatasetPendigits
def setUp(self): # set of observations self.ctrl = Controller() self.ctrl.set_dataset(Dataset.HASS_TESTING) self.hass_obj = self.ctrl._dataset #type: DatasetHomeassistant self.hmm_model = HMMForward(self.ctrl)