# Experiment on first (and only) building b = dataset.buildings[1] # Filtering to include only top 8 appliances b = filter_top_k_appliances(b, 3) # Dividing the data into train and test train, test = train_test_split(b) # Again subdivide data into train, test for testing on even smaller data #train, test = train_test_split(train, test_size=.5) # Initializing FHMM Disaggregator disaggregator = FHMM() train_mains = train.utility.electric.mains[ train.utility.electric.mains.keys()[0]][DISAGG_FEATURE] # Get appliances data app = train.utility.electric.appliances train_appliances = pd.DataFrame({appliance: app[appliance][DISAGG_FEATURE] for appliance in app if DISAGG_FEATURE in app[appliance]}) # Train t1 = time.time() disaggregator.train(train, disagg_features=[DISAGG_FEATURE]) t2 = time.time() print("Runtime to train = {:.2f} seconds".format(t2 - t1))
# In[ ]: from nilmtk.disaggregate.fhmm_exact import create_combined_hmm # In[ ]: learnt_model_combined = create_combined_hmm(new_learnt_models) # In[ ]: from nilmtk.disaggregate.fhmm_exact import FHMM # In[16]: f = FHMM() # In[17]: f.model = learnt_model_combined f.individual = new_learnt_models # In[18]: import pickle # In[19]: pickle.dump(f, open("../fhmm_model_all_%d.p" % mins, "wb")) # In[8]:
print("Runtime to export to HDF5 = {:.2f}".format(t2 - t1)) # Now importing the data from HDF5 store t1 = time.time() dataset = pecan.Pecan_15min() dataset.load_hdf5(EXPORT_PATH) t2 = time.time() print("Runtime to importing from HDF5 = {:.2f}".format(t2 - t1)) # Doing analysis on Home_10 b = dataset.buildings[9] train, test = train_test_split(b) # Initializing CO 1D Disaggregator disaggregator = FHMM() train_mains = train.utility.electric.mains[ train.utility.electric.mains.keys()[0]][DISAGG_FEATURE] # Get appliances data app = train.utility.electric.appliances train_appliances = pd.DataFrame({appliance: app[appliance][DISAGG_FEATURE] for appliance in app}) # Train disaggregator.train(train_mains, train_appliances) # Disaggregate disaggregator.disaggregate(test.utility.electric.mains[ test.utility.electric.mains.keys()[0]][DISAGG_FEATURE]) # Metrics
from nilmtk.disaggregate.fhmm_exact import create_combined_hmm # In[ ]: learnt_model_combined = create_combined_hmm(new_learnt_models) # In[ ]: from nilmtk.disaggregate.fhmm_exact import FHMM # In[16]: f = FHMM() # In[17]: f.model = learnt_model_combined f.individual = new_learnt_models # In[18]: import pickle # In[19]:
# Get data of Home_01 building = dataset.buildings[1] # Preprocessing: print(' :::::::::: Dividing data into test and train') lenToUse = len(building.utility.electric.appliances.items()[0][1]) train, test = train_test_split( building, train_size=int(float(1) / 2.0 * lenToUse), test_size=int(float(1) / 2.0 * lenToUse) ) # This splits the dataset in 1/2 for training and 1/2 for testing. Test-training ratio should be set by users ### ---------- Disaggregation with FHMM ---------- # # --- training model disaggregator = FHMM() disaggregator_name = "FHMM" print(' :::::::::: Training FHMM disaggregator') t1 = time.time() disaggregator.train(train, disagg_features=[DISAGG_FEATURE]) t2 = time.time() print("Runtime to train for {} = {:.2f} seconds".format( disaggregator_name, t2 - t1)) train_time = t2 - t1 # --- disaggregation print(' :::::::::: Starting disaggregation') t1 = time.time() disaggregator.disaggregate(test) t2 = time.time() print("Runtime to disaggregate for {}= {:.2f} seconds".format(