def hart85(start_train, end_train, start_test, end_test, train_elec): #Start training data.set_window(start_train, end_train) elec = data.buildings[1].elec hart = hart_85.Hart85() hart.train(train_elec, sample_period=1) #Start disaggregating data.set_window(start_test, end_test) disag_filename = './build/disagg_sum_hart85_{}_k.h5'.format( len(train_elec.meters)) output = HDFDataStore(disag_filename, 'w') hart.disaggregate(elec.mains(), output) output.close() disag = DataSet(disag_filename) disag_elec = disag.buildings[1].elec disag_elec.plot() plt.show() plt.title("HART85") #Calculate F1-Score f1 = f1_score(disag_elec, train_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance') plt.xlabel('f-score') plt.title("Hart85") plt.show()
def plot_f_score(self, disag_filename): plt.figure() from nilmtk.metrics import f1_score disag = DataSet(disag_filename) disag_elec = disag.buildings[building].elec f1 = f1_score(disag_elec, test_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance'); plt.xlabel('f-score'); plt.title(type(self.model).__name__);
def plot_f_score(self, disag_filename): plt.figure() from nilmtk.metrics import f1_score disag = DataSet(disag_filename) disag_elec = disag.buildings[building].elec f1 = f1_score(disag_elec, test_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance') plt.xlabel('f-score') plt.title(type(self.model).__name__)
def co(start_train, end_train, start_test, end_test, train_elec): #Start training data.set_window(start_train, end_train) elec = data.buildings[1].elec co = CombinatorialOptimisation() co.train(train_elec, ac_type='active', physical_quantity='power', sample_period=1) #Start disaggregating data.set_window(start_test, end_test) disag_filename = './build/disagg_sum_co_{}_k.h5'.format( len(train_elec.meters)) output = HDFDataStore(disag_filename, 'w') co.disaggregate(elec.mains(), output, ac_type='active', physical_quantity='power', sample_period=1) output.close() dates_dict = { "start_train": start_train, "end_train": end_train, "start_test": start_test, "end_test": end_test } # write test and train timeframe into json file with open(disag_filename + ".json", 'w') as dates_file: json.dump(dates_dict, dates_file) #Calulate F1-Score disag = DataSet(disag_filename) disag_elec = disag.buildings[1].elec disag_elec.plot() plt.title("CO") plt.show() f1 = f1_score(disag_elec, train_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance') plt.xlabel('f-score') plt.title("CO") plt.show()
def mle(start_train, end_train, start_test, end_test, train_elec): # #Start training data.set_window(start_train, end_train) elec = data.buildings[1].elec mle = maximum_likelihood_estimation.MLE() mle.sample_period = "1s" mle.train(train_elec) #Start disaggregating data.set_window(start_test, end_test) disag_filename = './build/disagg_sum_mle_{}_k.h5'.format( len(train_elec.meters)) output = HDFDataStore(disag_filename, 'w') mle.disaggregate(elec.mains(), output) output.close() dates_dict = { "start_train": start_train, "end_train": end_train, "start_test": start_test, "end_test": end_test } # write test and train timeframe into json file with open(disag_filename + ".json", 'w') as dates_file: json.dump(dates_dict, dates_file) disag = DataSet(disag_filename) disag_elec = disag.buildings[1].elec disag_elec.plot() plt.show() plt.title("FHMM") #Calculate F1-Score f1 = f1_score(disag_elec, train_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance') plt.xlabel('f-score') plt.title("FHMM") plt.show()
building_number = 3 disag_filename = join(data_dir, 'disag-fhmm' + str(building_number) + '.h5') data = DataSet(join(data_dir, 'redd.h5')) print("Loading building " + str(building_number)) elec = data.buildings[building_number].elec top_train_elec = elec.submeters().select_top_k(k=5) fhmm = fhmm_exact.FHMM() fhmm.train(top_train_elec) output = HDFDataStore(disag_filename, 'w') fhmm.disaggregate(elec.mains(), output) output.close() ### f1score fhmm disag = DataSet(disag_filename) disag_elec = disag.buildings[building_number].elec f1 = f1_score(disag_elec, elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance') plt.xlabel('f-score') plt.title("FHMM") plt.tight_layout() plt.savefig(join(data_dir, 'f1-fhmm' + str(building_number) + '.png')) disag.store.close() #### print("Finishing building " + str(building_number))
print(middleTimeStr) train.set_window(end=middleTimeStr) test.set_window(start=middleTimeStr) train_elec = train.buildings[building_number].elec test_elec = test.buildings[building_number].elec top_train_elec = train_elec.submeters().select_top_k(k=5) fhmm = fhmm_exact.FHMM() #mk change this later to default fhmm.train(top_train_elec, sample_period=60, resample=True) outputAddress = "/nilmtk/data/iawe_449_3.h5" output = HDFDataStore(outputAddress, 'w') fhmm.disaggregate(test_elec.mains(), output, sample_period=60, resample=True) output.close() disag = DataSet(outputAddress) #load FHMM prediction disag_elec = disag.buildings[building_number].elec #disag_elec.plot() # plot all disaggregated data f1 = f1_score(disag_elec, test_elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') disag.store.window = TimeFrame(start='2013-07-10 18:00:00-05:00', end='2013-07-17 04:00:00-05:00') disag.buildings[building_number].elec.plot() # plot all disaggregated data
data_dir = '/data/REDD' building_number = 3 disag_filename = join(data_dir, 'disag-fhmm' + str(building_number) + '.h5') data = DataSet(join(data_dir, 'redd.h5')) print("Loading building " + str(building_number)) elec = data.buildings[building_number].elec top_train_elec = elec.submeters().select_top_k(k=5) fhmm = fhmm_exact.FHMM() fhmm.train(top_train_elec) output = HDFDataStore(disag_filename, 'w') fhmm.disaggregate(elec.mains(), output) output.close() ### f1score fhmm disag = DataSet(disag_filename) disag_elec = disag.buildings[building_number].elec f1 = f1_score(disag_elec, elec) f1.index = disag_elec.get_labels(f1.index) f1.plot(kind='barh') plt.ylabel('appliance'); plt.xlabel('f-score'); plt.title("FHMM"); plt.savefig(join(data_dir, 'f1-fhmm' + str(building_number) + '.png')) disag.store.close() #### print("Finishing building " + str(building_number))
def f1_score(disag, original): f1_score = nilmtk_metrics.f1_score(disag, original) return f1_score
output = HDFDataStore(str(dum_outfile), 'w') print('\n== dum.disaggregate(dataset.buildings[%d].mains(), output)' % (disag_building)) dum.disaggregate(dataset.buildings[disag_building].elec.mains(), output) output.close() ### Results print('\n== Plotting Dummy disaggregation results...') da_data = DataSet(str(dum_outfile)) da_elec = da_data.buildings[disag_building].elec ax = da_elec.plot() ax.set_title("B%d Dummy disaggregation results" % (disag_building)) plt.savefig('results/%s__b%d__elec__dummy.png' % (dataset_name, disag_building)) plt.clf() f1 = f1_score(da_elec, dataset.buildings[disag_building].elec) f1.index = da_elec.get_labels([int(i) for i in f1.index]) ax = f1.plot(kind='barh') ax.set_ylabel('appliance') ax.set_xlabel('f-score') ax.set_title("B%d Dummy disaggregation accuracy" % (disag_building)) plt.savefig('results/%s__b%d__fscore__dummy.png' % (dataset_name, disag_building)) plt.clf() da_data.store.close() ## CO training and disaggregation ### Training co = CombinatorialOptimisation() print('\n== co.train(dataset.buildings[%d].elec)' % (train_building))
def test_all(path_to_directory): ''' path_to_directory: Contains the h5 files on which the tests are supposed to be run ''' check_directory_exists(path_to_directory) #files=[f for f in listdir(path_to_directory) and '.h5' in f and '.swp' not in f] files = [f for f in listdir(path_to_directory) if isfile(join(path_to_directory, f)) and '.h5' in f and '.swp' not in f] files.sort() print ("Datasets collected and sorted. Processing...") try: for i, file in enumerate(files): current_file=DataSet(join(path_to_directory, file)) print ("Printing metadata for current file...done.") print_dict(current_file.metadata) print (" Loading file # ", i, " : ", file, ". Please wait.") for building_number in range(1, len(current_file.buildings)+1): #Examine metadata for a single house elec=current_file.buildings[building_number].elec print ("The dataset being processed is : ", elec.dataset()) print ("Metadata for current file: ") print_dict(current_file.buildings[building_number].metadata) print ("Appliance label information: ", elec.appliance_label()) #print (elec.appliances) print ("Appliances:- ") for i in elec.appliances: print (i) print ("Examining sub-metered appliances...") print ("Collecting stats on meters...Done.") print (elec._collect_stats_on_all_meters) print ("Timeframe: ", elec.get_timeframe()) print ("Available power AC types: ", elec.available_power_ac_types()) print ("Clearing cache...done.") elec.clear_cache() print ("Testing if there are meters from multiple buildings. Result returned by method: ", elec.contains_meters_from_multiple_buildings()) # TODO: Find a better way to test the correlation function # print ("Testing the correlation function. ", elec.correlation(elec)) print ("List of disabled meters: ", elec.disabled_meters) print ("Trying to determine the dominant appliance: ") try: elec.dominant_appliance() except RuntimeError: print ('''More than one dominant appliance in MeterGroup! (The dominant appliance per meter should be manually specified in the metadata. If it isn't and if there are multiple appliances for a meter then NILMTK assumes all appliances on that meter are dominant. NILMTK can't automatically distinguish between multiple appliances on the same meter (at least, not without using NILM!))''') pass print ("Dropout rate: ", elec.dropout_rate()) try: print ("Calculating energy per meter:") print (elec.energy_per_meter()) print ("Calculating total entropy") print (elec.entropy()) print ("Calculating entropy per meter: ") print (elec.entropy_per_meter()) except ValueError: print ("ValueError: Total size of array must remain unchanged.") pass print ("Calculating fraction per meter.") print (elec.fraction_per_meter()) #print ("Average energy per period: ", elec.average_energy_per_period()) print ("Executing functions...") lis=[] func="" '''for function in dir(elec): try: start=time.time() if ("__" not in function or "dataframe_of_meters" not in function): func=getattr(elec, function) print ("Currently executing ", function, ". Please wait...") print (func()) # print ("cProfile stats - printed") # cProfile.run("func") end=time.time() print ("Time taken for the entire process : ", (end - start)) except AttributeError: print ("Attribute error occured. ") except TypeError: lis.append(function) print ("Warning: TypeError") pass''' print ("Plotting wiring hierarchy of meters....") elec.draw_wiring_graph() ## DISAGGREGATION STARTS HERE appliance_type="unknown" #TODO : appliance_type should cycle through all appliances and check for each of them. For this, use a list. selected_appliance=nilmtk.global_meter_group.select_using_appliances(type=appliance_type) appliance_restricted = MeterGroup(selected_appliance.meters) if ((appliance_restricted.proportion_of_upstream_total_per_meter()) is not None): proportion_per_appliance = appliance_restricted.proportion_of_upstream_total_per_meter() proportion_per_appliance.plot(kind='bar'); plt.title('Appliance energy as proportion of total building energy'); plt.ylabel('Proportion'); plt.xlabel('Appliance (<appliance instance>, <building instance>, <dataset name>)'); selected_appliance.select(building=building_number).total_energy() selected_appliance.select(building=1).plot(); appliance_restricted = MeterGroup(selected_appliance.meters) daily_energy = pd.DataFrame([meter.average_energy_per_period(offset_alias='D') for meter in appliance_restricted.meters]) daily_energy.plot(kind='hist'); plt.title('Histogram of daily energy'); plt.xlabel('energy (kWh)'); plt.ylabel('Occurences'); plt.legend().set_visible(False) current_file.store.window=TimeFrame(start='2012-04-01 00:00:00-05:00', end='2012-04-02 00:00:00-05:00') #elec.plot(); fraction = elec.submeters().fraction_per_meter().dropna() labels = elec.get_appliance_labels(fraction.index) plt.figure(figsize=(8,8)) fraction.plot(kind='pie', labels=labels); elec.select_using_appliances(category='heating') elec.select_using_appliances(category='single-phase induction motor') co = CombinatorialOptimisation() co.train(elec) for model in co.model: print_dict(model) disag_filename = join(data_dir, 'ampds-disag.h5') output = HDFDataStore(disag_filename, 'w') co.disaggregate(elec.mains(), output) output.close() disag = DataSet(disag_filename) disag_elec = disag.buildings[building_number].elec f1 = f1_score(disag_elec, elec) f1.index = disag_elec.get_appliance_labels(f1.index) f1.plot(kind='bar') plt.xlabel('appliance'); plt.ylabel('f-score'); disag_elec.plot() disag.store.close() except AttributeError: print ("AttributeError occured while executing. This means that the value returned by proportion_per_appliance = appliance_restricted.proportion_of_upstream_total_per_meter() is None") pass