def test_select(self): fridge_meter = ElecMeter() fridge = Appliance({'type':'fridge', 'instance':1}) fridge_meter.appliances = [fridge] mg = MeterGroup([fridge_meter]) self.assertEqual(mg.select_using_appliances(category='cold'), mg)
def test_proportion_of_energy_submetered(self): meters = [] for i in [1,2,3]: meter_meta = self.datastore.load_metadata('building1')['elec_meters'][i] meter_id = ElecMeterID(i, 1, 'REDD') meter = ElecMeter(self.datastore, meter_meta, meter_id) meters.append(meter) mains = meters[0] mg = MeterGroup(meters) self.assertEqual(mg.proportion_of_energy_submetered(), 1.0)
def test_wiring_graph(self): meter1 = ElecMeter(metadata={'site_meter': True}, meter_id=ElecMeterID(1,1,'REDD')) meter2 = ElecMeter(metadata={'submeter_of': 1}, meter_id=ElecMeterID(2,1,'REDD')) meter3 = ElecMeter(metadata={'submeter_of': 2}, meter_id=ElecMeterID(3,1,'REDD')) mg = MeterGroup([meter1, meter2, meter3]) wiring_graph = mg.wiring_graph() self.assertIs(mg.mains(), meter1) self.assertEqual(mg.meters_directly_downstream_of_mains(), [meter2]) self.assertEqual(wiring_graph.nodes(), [meter2, meter3, meter1])
def test_dual_supply(self): elec_meters = {1: {'data_location': '/building1/elec/meter1', 'device_model': 'Energy Meter'}, 2: {'data_location': '/building1/elec/meter1', 'device_model': 'Energy Meter'}, 3: {'data_location': '/building1/elec/meter1', 'device_model': 'Energy Meter'}} appliances = [{'type': 'washer dryer', 'instance': 1, 'meters': [1,2]}, {'type': 'fridge', 'instance': 1, 'meters': [3]}] mg = MeterGroup() mg.load(self.datastore, elec_meters, appliances, BuildingID(1, 'REDD')) self.assertEqual(mg['washer dryer'].total_energy()['active'], mg['fridge'].total_energy()['active'] * 2) self.assertIsInstance(mg['washer dryer'], MeterGroup) self.assertIsInstance(mg['fridge'], ElecMeter)
def test_full_results_with_no_sections_raises_runtime_error(self): mg = MeterGroup([ElecMeter(), ElecMeter()]) with self.assertRaises(RuntimeError): mg.dropout_rate(full_results=True)
def combinatorial_optimisation(dataset_path, train_building, train_start, train_end, val_building, val_start, val_end, test_building, test_start, test_end, meter_key, sample_period): # Start tracking time start = time.time() # Prepare dataset and options # print("========== OPEN DATASETS ============") dataset_path = dataset_path train = DataSet(dataset_path) train.set_window(start=train_start, end=train_end) val = DataSet(dataset_path) val.set_window(start=val_start, end=val_end) test = DataSet(dataset_path) test.set_window(start=test_start, end=test_end) train_building = train_building test_building = test_building meter_key = meter_key sample_period = sample_period train_elec = train.buildings[train_building].elec val_elec = val.buildings[val_building].elec test_elec = test.buildings[test_building].elec appliances = [meter_key] selected_meters = [train_elec[app] for app in appliances] selected_meters.append(train_elec.mains()) selected = MeterGroup(selected_meters) co = CombinatorialOptimisation() # print("========== TRAIN ============") co.train(selected, sample_period=sample_period) # print("========== DISAGGREGATE ============") # Validation val_disag_filename = 'disag-out-val.h5' output = HDFDataStore(val_disag_filename, 'w') co.disaggregate(val_elec.mains(), output_datastore=output) output.close() # Test test_disag_filename = 'disag-out-test.h5' output = HDFDataStore(test_disag_filename, 'w') co.disaggregate(test_elec.mains(), output_datastore=output) output.close() # print("========== RESULTS ============") # Validation result_val = DataSet(val_disag_filename) res_elec_val = result_val.buildings[val_building].elec rpaf_val = metrics.recall_precision_accuracy_f1(res_elec_val[meter_key], val_elec[meter_key]) val_metrics_results_dict = { 'recall_score': rpaf_val[0], 'precision_score': rpaf_val[1], 'accuracy_score': rpaf_val[2], 'f1_score': rpaf_val[3], 'mean_absolute_error': metrics.mean_absolute_error(res_elec_val[meter_key], val_elec[meter_key]), 'mean_squared_error': metrics.mean_square_error(res_elec_val[meter_key], val_elec[meter_key]), 'relative_error_in_total_energy': metrics.relative_error_total_energy(res_elec_val[meter_key], val_elec[meter_key]), 'nad': metrics.nad(res_elec_val[meter_key], val_elec[meter_key]), 'disaggregation_accuracy': metrics.disaggregation_accuracy(res_elec_val[meter_key], val_elec[meter_key]) } # Test result = DataSet(test_disag_filename) res_elec = result.buildings[test_building].elec rpaf = metrics.recall_precision_accuracy_f1(res_elec[meter_key], test_elec[meter_key]) test_metrics_results_dict = { 'recall_score': rpaf[0], 'precision_score': rpaf[1], 'accuracy_score': rpaf[2], 'f1_score': rpaf[3], 'mean_absolute_error': metrics.mean_absolute_error(res_elec[meter_key], test_elec[meter_key]), 'mean_squared_error': metrics.mean_square_error(res_elec[meter_key], test_elec[meter_key]), 'relative_error_in_total_energy': metrics.relative_error_total_energy(res_elec[meter_key], test_elec[meter_key]), 'nad': metrics.nad(res_elec[meter_key], test_elec[meter_key]), 'disaggregation_accuracy': metrics.disaggregation_accuracy(res_elec[meter_key], test_elec[meter_key]) } # end tracking time end = time.time() time_taken = end - start # in seconds # model_result_data = { # 'algorithm_name': 'CO', # 'datapath': dataset_path, # 'train_building': train_building, # 'train_start': str(train_start.date()) if train_start != None else None , # 'train_end': str(train_end.date()) if train_end != None else None , # 'test_building': test_building, # 'test_start': str(test_start.date()) if test_start != None else None , # 'test_end': str(test_end.date()) if test_end != None else None , # 'appliance': meter_key, # 'sampling_rate': sample_period, # # 'algorithm_info': { # 'options': { # 'epochs': None # }, # 'hyperparameters': { # 'sequence_length': None, # 'min_sample_split': None, # 'num_layers': None # }, # 'profile': { # 'parameters': None # } # }, # # 'metrics': metrics_results_dict, # # 'time_taken': format(time_taken, '.2f'), # } model_result_data = { 'val_metrics': val_metrics_results_dict, 'test_metrics': test_metrics_results_dict, 'time_taken': format(time_taken, '.2f'), 'epochs': None, } # Close digag_filename result.store.close() result_val.store.close() # Close Dataset files train.store.close() val.store.close() test.store.close() return model_result_data
def test_all(path_to_directory): ''' path_to_directory: Contains the h5 files on which the tests are supposed to be run ''' check_directory_exists(path_to_directory) #files=[f for f in listdir(path_to_directory) and '.h5' in f and '.swp' not in f] files = [f for f in listdir(path_to_directory) if isfile(join(path_to_directory, f)) and '.h5' in f and '.swp' not in f] files.sort() print ("Datasets collected and sorted. Processing...") try: for i, file in enumerate(files): current_file=DataSet(join(path_to_directory, file)) print ("Printing metadata for current file...done.") print_dict(current_file.metadata) print (" Loading file # ", i, " : ", file, ". Please wait.") for building_number in range(1, len(current_file.buildings)+1): #Examine metadata for a single house elec=current_file.buildings[building_number].elec print ("The dataset being processed is : ", elec.dataset()) print ("Metadata for current file: ") print_dict(current_file.buildings[building_number].metadata) print ("Appliance label information: ", elec.appliance_label()) #print (elec.appliances) print ("Appliances:- ") for i in elec.appliances: print (i) print ("Examining sub-metered appliances...") print ("Collecting stats on meters...Done.") print (elec._collect_stats_on_all_meters) print ("Timeframe: ", elec.get_timeframe()) print ("Available power AC types: ", elec.available_power_ac_types()) print ("Clearing cache...done.") elec.clear_cache() print ("Testing if there are meters from multiple buildings. Result returned by method: ", elec.contains_meters_from_multiple_buildings()) # TODO: Find a better way to test the correlation function # print ("Testing the correlation function. ", elec.correlation(elec)) print ("List of disabled meters: ", elec.disabled_meters) print ("Trying to determine the dominant appliance: ") try: elec.dominant_appliance() except RuntimeError: print ('''More than one dominant appliance in MeterGroup! (The dominant appliance per meter should be manually specified in the metadata. If it isn't and if there are multiple appliances for a meter then NILMTK assumes all appliances on that meter are dominant. NILMTK can't automatically distinguish between multiple appliances on the same meter (at least, not without using NILM!))''') pass print ("Dropout rate: ", elec.dropout_rate()) try: print ("Calculating energy per meter:") print (elec.energy_per_meter()) print ("Calculating total entropy") print (elec.entropy()) print ("Calculating entropy per meter: ") print (elec.entropy_per_meter()) except ValueError: print ("ValueError: Total size of array must remain unchanged.") pass print ("Calculating fraction per meter.") print (elec.fraction_per_meter()) #print ("Average energy per period: ", elec.average_energy_per_period()) print ("Executing functions...") lis=[] func="" '''for function in dir(elec): try: start=time.time() if ("__" not in function or "dataframe_of_meters" not in function): func=getattr(elec, function) print ("Currently executing ", function, ". Please wait...") print (func()) # print ("cProfile stats - printed") # cProfile.run("func") end=time.time() print ("Time taken for the entire process : ", (end - start)) except AttributeError: print ("Attribute error occured. ") except TypeError: lis.append(function) print ("Warning: TypeError") pass''' print ("Plotting wiring hierarchy of meters....") elec.draw_wiring_graph() ## DISAGGREGATION STARTS HERE appliance_type="unknown" #TODO : appliance_type should cycle through all appliances and check for each of them. For this, use a list. selected_appliance=nilmtk.global_meter_group.select_using_appliances(type=appliance_type) appliance_restricted = MeterGroup(selected_appliance.meters) if ((appliance_restricted.proportion_of_upstream_total_per_meter()) is not None): proportion_per_appliance = appliance_restricted.proportion_of_upstream_total_per_meter() proportion_per_appliance.plot(kind='bar'); plt.title('Appliance energy as proportion of total building energy'); plt.ylabel('Proportion'); plt.xlabel('Appliance (<appliance instance>, <building instance>, <dataset name>)'); selected_appliance.select(building=building_number).total_energy() selected_appliance.select(building=1).plot(); appliance_restricted = MeterGroup(selected_appliance.meters) daily_energy = pd.DataFrame([meter.average_energy_per_period(offset_alias='D') for meter in appliance_restricted.meters]) daily_energy.plot(kind='hist'); plt.title('Histogram of daily energy'); plt.xlabel('energy (kWh)'); plt.ylabel('Occurences'); plt.legend().set_visible(False) current_file.store.window=TimeFrame(start='2012-04-01 00:00:00-05:00', end='2012-04-02 00:00:00-05:00') #elec.plot(); fraction = elec.submeters().fraction_per_meter().dropna() labels = elec.get_appliance_labels(fraction.index) plt.figure(figsize=(8,8)) fraction.plot(kind='pie', labels=labels); elec.select_using_appliances(category='heating') elec.select_using_appliances(category='single-phase induction motor') co = CombinatorialOptimisation() co.train(elec) for model in co.model: print_dict(model) disag_filename = join(data_dir, 'ampds-disag.h5') output = HDFDataStore(disag_filename, 'w') co.disaggregate(elec.mains(), output) output.close() disag = DataSet(disag_filename) disag_elec = disag.buildings[building_number].elec f1 = f1_score(disag_elec, elec) f1.index = disag_elec.get_appliance_labels(f1.index) f1.plot(kind='bar') plt.xlabel('appliance'); plt.ylabel('f-score'); disag_elec.plot() disag.store.close() except AttributeError: print ("AttributeError occured while executing. This means that the value returned by proportion_per_appliance = appliance_restricted.proportion_of_upstream_total_per_meter() is None") pass
TZ = pytz.timezone(TZ_STRING) elec = dataset.buildings[1].elec submeters = elec.meters_directly_downstream_of_mains() # Select appliances used in top K plot APPLIANCES = ['fridge freezer', 'HTPC', 'dish washer', 'washer dryer', 'kettle'] selected_meters = [submeters[appliance] for appliance in APPLIANCES] remainder = [] for meter in submeters.meters: for appliance in APPLIANCES: if meter.matches_appliances({'type': appliance}): break else: remainder.append(meter) remainder = MeterGroup(remainder) remainder.name = 'Other submeters' selected_meters = MeterGroup(selected_meters[:2] + [remainder] + selected_meters[2:]) selected_meters['HTPC'].name = 'Home theatre PC' # Reverse the colour palette so it matches top_5_energy colors = sns.color_palette('deep') colors.reverse() colors = [colors[i] for i in [4, 2, 5, 1, 3, 0]] sns.set_palette(colors) # Set window DATE = "2014-12-07" next_day = pd.Timestamp(DATE) + timedelta(days=1) dataset.set_window(DATE, next_day)
test.set_window(start=split_point) train_elec = train.buildings[b_id].elec test_elec = test.buildings[b_id].elec test_mains = test_elec.mains() # Fridge elec fridge_elec_train = train_elec[('fridge', fridge_instance)] fridge_elec_test = test_elec[('fridge', fridge_instance)] num_states_dict = {fridge_elec_train: num_states} # Finding top N appliances top_k_train_list = top_k_dict[str(f_id)][:K] print("Top %d list is " % (K), top_k_train_list) top_k_train_elec = MeterGroup([ m for m in ds.buildings[b_id].elec.meters if m.instance() in top_k_train_list ]) if not os.path.exists("%s/%s/" % (BASH_RUN_FRIDGE, out_file_name)): os.makedirs("%s/%s" % (BASH_RUN_FRIDGE, out_file_name)) # Add this fridge to training if this fridge is not in top-k if fridge_elec_train not in top_k_train_elec.meters: top_k_train_elec.meters.append(fridge_elec_train) try: clf_name = classifier clf = cls_dict[clf_name] print("-" * 80) print("Training on %s" % clf_name)