Beispiel #1
0
def test_convert_random_dataset():
    input_filepath = r'C:\Users\CVLab\Documents\nilm\nilmtk\data\random.h5'
    output_filepath = r'C:\Users\CVLab\Documents\nilm\nilmtk\data\random_csv'

    if os.path.isdir(output_filepath):
        shutil.rmtree(output_filepath)

    input_store = HDFDataStore(input_filepath)
    output_store = CSVDataStore(output_filepath)

    convert_datastore(input_store, output_store)

    input_store.close()
    output_store.close()
Beispiel #2
0
def test_convert_random_dataset():
    input_filepath = 'data/random.h5'
    output_filepath = 'data/random_csv'

    if os.path.isdir(output_filepath):
        shutil.rmtree(output_filepath)

    input_store = HDFDataStore(input_filepath)
    output_store = CSVDataStore(output_filepath)

    convert_datastore(input_store, output_store)

    input_store.close()
    output_store.close()
def test_convert_random_dataset():
    input_filepath = 'data/random.h5'
    output_filepath = 'data/random_csv'

    if os.path.isdir(output_filepath):
        shutil.rmtree(output_filepath)

    input_store=HDFDataStore(input_filepath)
    output_store=CSVDataStore(output_filepath)

    convert_datastore(input_store, output_store)

    input_store.close()
    output_store.close()
    
Beispiel #4
0
    def test_co_correctness(self):
        elec = self.dataset.buildings[1].elec
        co = CombinatorialOptimisation()
        co.train(elec)
        mains = elec.mains()
        output = HDFDataStore("output.h5", "w")
        co.disaggregate(mains, output, resample_seconds=1)

        for meter in range(2, 4):
            df1 = output.store.get("/building1/elec/meter{}".format(meter))
            df2 = self.dataset.store.store.get("/building1/elec/meter{}".format(meter))

            self.assertEqual((df1 == df2).sum().values[0], len(df1.index))
            self.assertEqual(len(df1.index), len(df2.index))
        output.close()
        rm("output.h5")
Beispiel #5
0
def get_datastore(filename, format=None, mode='r'):
    """
    Parameters
    ----------
    filename : string
    format : 'CSV' or 'HDF', default: infer from filename ending.
    mode : 'r' (read-only), 'a' (append) or 'w' (write), default: 'r'

    Returns
    -------
    metadata : dict
    """
    if not format:
        if filename.endswith(".h5"):
            format = "HDF"
        elif filename.endswith(".csv"):
            format = "CSV"

    if filename is not None:
        if format == "HDF":
            return HDFDataStore(filename, mode)
        elif format == "CSV":
            return CSVDataStore(filename)
        else:
            raise ValueError('format not recognised')
    else:
        ValueError('filename is None')
Beispiel #6
0
    def test_fhmm_correctness(self):
        elec = self.dataset.buildings[1].elec
        fhmm = FHMM()
        fhmm.train(elec)
        mains = elec.mains()
        output = HDFDataStore('output.h5', 'w')
        fhmm.disaggregate(mains, output, sample_period=1)

        for meter in range(2, 4):
            df1 = output.store.get('/building1/elec/meter{}'.format(meter))
            df2 = self.dataset.store.store.get(
                '/building1/elec/meter{}'.format(meter))

            self.assertEqual((df1 == df2).sum().values[0], len(df1.index))
            self.assertEqual(len(df1.index), len(df2.index))
        output.close()
        remove("output.h5")
Beispiel #7
0
    def test_fhmm_correctness(self):
        elec = self.dataset.buildings[1].elec
        fhmm = FHMM()
        fhmm.train(elec)
        mains = elec.mains()
        output = HDFDataStore('output.h5', 'w')
        fhmm.disaggregate(mains, output, sample_period=1)

        for meter in range(2, 4):
            df1 = output.store.get('/building1/elec/meter{}'.format(meter))
            df2 = self.dataset.store.store.get(
                '/building1/elec/meter{}'.format(meter))

            self.assertEqual((df1 == df2).sum().values[0], len(df1.index))
            self.assertEqual(len(df1.index), len(df2.index))
        output.close()
        remove("output.h5")
Beispiel #8
0
    def test_co_correctness(self):
        elec = self.dataset.buildings[1].elec
        co = CombinatorialOptimisation()
        co.train(elec)
        mains = elec.mains()
        output = HDFDataStore('output.h5', 'w')
        co.disaggregate(mains, output, resample_seconds=1)

        for meter in range(2, 4):
            df1 = output.store.get('/building1/elec/meter{}'.format(meter))
            df2 = self.dataset.store.store.get(
                '/building1/elec/meter{}'.format(meter))

            self.assertEqual((df1 == df2).sum().values[0], len(df1.index))
            self.assertEqual(len(df1.index), len(df2.index))
        output.close()
        remove("output.h5")
Beispiel #9
0
 def import_model(self, filename):
     imported_model = pickle.load(open(filename, 'r'))
     self.model = imported_model.model
     # recreate datastores from filenames
     for pair in self.model:
         pair['training_metadata'].store = HDFDataStore(
             pair['training_metadata'].store)
     self.state_combinations = imported_model.state_combinations
     self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH
    def import_model(self, filename):
        with open(filename, 'rb') as in_file:
            imported_model = pickle.load(in_file)

        self.model = imported_model.model

        # Recreate datastores from filenames
        for pair in self.model:
            store_filename = pair['training_metadata'].store
            pair['training_metadata'].store = HDFDataStore(store_filename)

        self.state_combinations = imported_model.state_combinations
        self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH
Beispiel #11
0
    def import_model(self, filename):
        with open(filename, 'rb') as in_file:
            imported_model = pickle.load(in_file)

        self.model = imported_model.model
        self.individual = imported_model.individual

        # Recreate datastores from filenames
        for meter in self.individual.keys():
            store_filename = meter.store
            meter.store = HDFDataStore(store_filename)

        self.meters = list(self.individual.keys())
Beispiel #12
0
def get_datastore(filename, format, mode='a'):
    """
    Parameters
    ----------
    filename : string
    format : 'CSV' or 'HDF'
    mode : 'a' (append) or 'w' (write), optional

    Returns
    -------
    metadata : dict
    """
    if filename is not None:
        if format == 'HDF':
            return HDFDataStore(filename, mode)
        elif format == 'CSV':
            return CSVDataStore(filename)
        else:
            raise ValueError('format not recognised')
    else:
        ValueError('filename is None')
 def setUpClass(cls):
     filename = join(data_dir(), 'energy_complex.h5')
     cls.datastore = HDFDataStore(filename)
     ElecMeter.load_meter_devices(cls.datastore)
     cls.meter_meta = cls.datastore.load_metadata(
         'building1')['elec_meters'][METER_ID.instance]
        house_model.train(train_mains,
                          train_appliance_meter,
                          epochs=25,
                          sample_period=1)
        house_model.export_model("house_{}_model-redd100.h5".format(i))
    else:
        house_model.import_model("house_{}_model-redd100.h5".format(i))

    house_models.append(house_model)

test = DataSet(r'..\\experiments\\data\\redd.h5')
test.set_window(start="30-4-2011")  # Use data from 4/30/2011 onward

for i in range(total_buildings):
    disag_file_path = "house_{}_disag-out.h5".format(i)
    if not Path(disag_file_path).exists():
        house_model = house_models[i]

        house_test_elec = test.buildings[i].elec
        house_test_mains = house_test_elec.mains().all_meters()[0]

        output = HDFDataStore(disag_file_path, 'w')
        # test_mains: The aggregated signal meter
        # output: The output datastore
        # tm_metadata_pointer: This is used in order to copy the metadata of the train meter into the datastore
        pred_df1 = house_model.disaggregate(house_test_mains,
                                            output,
                                            tm_metadata_pointer,
                                            sample_period=1)
        output.close()
from nilmtk.datastore import HDFDataStore, CSVDataStore 
from nilmtk.datastore.datastore import convert_datastore
import os
import shutil

input_filepath = 'data/random.h5'
output_filepath = 'data/random_csv'

if os.path.isdir(output_filepath):
    shutil.rmtree(output_filepath)

input_store=HDFDataStore(input_filepath)
output_store=CSVDataStore(output_filepath)

convert_datastore(input_store, output_store)

input_store.close()
output_store.close()
    gru.train(train_mains, train_meter, epochs=5, sample_period=1)
    gru.export_model("model-redd5.h5")
else:
    gru.import_model("model-redd5.h5")

test = DataSet(r'..\\..\\experiments\\data\\redd.h5')
test.set_window(start="30-4-2011")
test_elec = test.buildings[1].elec
test_mains = test_elec.mains().all_meters()[0]

disag_filename = 'disag-out.h5'
pred_df1 = None
if not Path("disag-out.h5").exists():
    from nilmtk.datastore import HDFDataStore

    output = HDFDataStore(disag_filename, 'w')

    # test_mains: The aggregated signal meter
    # output: The output datastore
    # train_meter: This is used in order to copy the metadata of the train meter into the datastore
    pred_df1 = gru.disaggregate(test_mains,
                                output,
                                train_meter,
                                sample_period=1)
    output.close()

result = DataSet(disag_filename)
res_elec = result.buildings[1].elec
predicted = res_elec['fridge']
ground_truth = test_elec['fridge']
Beispiel #17
0
if TRAINING:
    print("------ TRAINING ------")
    dae.train(train_mains, train_meter, epochs=8, sample_period=1)
    dae.export_model(MODEL)
else:
    print("------ IMPORT MODEL ------")
    dae.import_model(UKDALE_MODEL)

# dae.import_model("../data/UKDALE/dae-ukdale.h5")
test = DataSet(DATASET)
test.set_window(start=START_TEST, end=END_TEST)
test_elec = test.buildings[TEST_BUILDING].elec
test_mains = test_elec.mains()

from nilmtk.datastore import HDFDataStore
output = HDFDataStore(DISAG, 'w')

print("------ TESTING ------")
dae.disaggregate(test_mains, output, train_meter, sample_period=1)

result = DataSet(DISAG)
res_elec = result.buildings[TEST_BUILDING].elec
predicted = res_elec[APPLIANCE]
ground_truth = test_elec[APPLIANCE]

fig = plt.figure()
ax = plt.subplot(111)
ax.plot(ground_truth.power_series_all_data(), label='ground truth')
ax.plot(predicted.power_series_all_data(), label='predicted')
#plt.xlim('2013-06-10 00:00:00', '2013-06-10 23:59:59')
#plt.ylim(0, 300)
Beispiel #18
0
)  # The aggregated meter that provides the input
train_meter = train_elec.submeters()[
    'fridge']  # The kettle meter that is used as a training target

print("------ TRAINING ------")
#dae.train(train_mains, train_meter, epochs=50, sample_period=6)
#dae.export_model("../data/UKDALE/dae-ukdale.h5")
dae.import_model("../data/UKDALE/dae-ukdale.h5")

test = DataSet('../data/UKDALE/ukdale.h5')
test.set_window(start="2013-05-22", end="2013-09-24")
test_elec = test.buildings[TEST_BUILDING].elec
test_mains = test_elec.mains()
disag_filename = '../data/UKDALE/disag-dae-out.h5'  # The filename of the resulting datastore
from nilmtk.datastore import HDFDataStore
output = HDFDataStore(disag_filename, 'w')

print("------ TESTING ------")
dae.disaggregate(test_mains, output, train_meter, sample_period=6)
result = DataSet(disag_filename)
res_elec = result.buildings[TEST_BUILDING].elec
predicted = res_elec['fridge']
ground_truth = test_elec['fridge']

fig = plt.figure()
ax = plt.subplot(111)
ax.plot(predicted.power_series_all_data(), label='predicted')
ax.plot(ground_truth.power_series_all_data(), label='ground truth')
plt.xlim('2013-06-22 00:00:00', '2013-06-22 23:59:00')
plt.xlabel('Time [Hours]')
plt.ylabel('Power [W]')
Beispiel #19
0
 def setUpClass(cls):
     filename = join(data_dir(), 'random.h5')
     cls.datastore = HDFDataStore(filename)
     cls.keys = ['/building1/elec/meter{:d}'.format(i) for i in range(1, 6)]
Beispiel #20
0
from nilmtk.datastore import HDFDataStore, CSVDataStore
from nilmtk.datastore.datastore import convert_datastore
import os
import shutil

input_filepath = 'data/random.h5'
output_filepath = 'data/random_csv'

if os.path.isdir(output_filepath):
    shutil.rmtree(output_filepath)

input_store = HDFDataStore(input_filepath)
output_store = CSVDataStore(output_filepath)

convert_datastore(input_store, output_store)

input_store.close()
output_store.close()