示例#1
0
def plot_zoomed_original_predicted_energy_consumption():
    """
    Plots a zoomed time frame of the original prediction.
    """
    test = DataSet('../data/ukdale.h5')
    test.clear_cache()
    test.set_window(start="30-6-2013", end="15-7-2013")

    test_building = 1
    sample_period = 6
    meter_keys = ['kettle']

    test_elec = test.buildings[test_building].elec

    results_dir = '../results/UKDALE-ACROSS-BUILDINGS-RNN-lr=1e-05-2018-02-20-14-24-46'
    disag_filename = 'disag-out.h5'

    for key in meter_keys:
        # get predicted curve for the best epoch
        result = DataSet(os.path.join(results_dir, disag_filename))
        res_elec = result.buildings[test_building].elec
        predicted = res_elec[key]
        predicted = predicted.power_series(sample_period=sample_period)
        predicted = next(predicted)
        predicted.fillna(0, inplace=True)
        y1 = np.array(predicted)  # power
        x1 = np.arange(y1.shape[0])  # timestamps
        # The chosen time frame to zoom in
        x1 = x1[94000:102500]
        y1 = y1[94000:102500]

        ground_truth = test_elec[key]
        ground_truth = ground_truth.power_series(sample_period=sample_period)
        ground_truth = next(ground_truth)
        ground_truth.fillna(0, inplace=True)
        y2 = np.array(ground_truth)  # power
        x2 = np.arange(y2.shape[0])  # timestamps
        # The chosen time frame to zoom in
        x2 = x2[94000:102500]
        y2 = y2[94000:102500]

        fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
        ax1.plot(x1, y1, color='r', label='predicted')
        ax1.plot(x2, y2, color='b', label='ground truth')
        ax2.plot(x1, y1, color='r')
        ax3.plot(x2, y2, color='b')
        ax1.set_title('Appliance: {}'.format(key))
        plt.xticks(
            np.arange(94000, 102500, 2000),
            ('5-10-2013 12:00', '16:00', '20:00', '6-10-2013 00:00', '04:00'))
        fig.legend()
        fig.savefig(
            os.path.join(
                results_dir,
                'zoomed_original_predicted_vs_ground_truth_{}.png'.format(
                    key)))
示例#2
0
def plot_zoomed_new_predicted_energy_consumption():
    """
    Predicts a new short window (of the given test set).
    """
    train = DataSet('../data/ukdale.h5')
    train.clear_cache()
    train.set_window(start="13-4-2013", end="31-7-2013")
    test = DataSet('../data/ukdale.h5')
    test.clear_cache()
    test.set_window(start='16-9-2013 17:00:00', end='16-9-2013 18:00:00')

    train_building = 1
    test_building = 1
    sample_period = 6
    meter_key = 'kettle'
    learning_rate = 1e-5
    best_epoch = 140

    train_elec = train.buildings[train_building].elec
    test_elec = test.buildings[test_building].elec

    train_meter = train_elec.submeters()[meter_key]
    test_mains = test_elec.mains()

    results_dir = '../results/UKDALE-RNN-lr=1e-05-2018-02-16-18-52-34'
    train_logfile = os.path.join(results_dir, 'training.log')
    val_logfile = os.path.join(results_dir, 'validation.log')
    rnn = RNNDisaggregator(train_logfile,
                           val_logfile,
                           learning_rate,
                           init=False)

    model = 'UKDALE-RNN-kettle-{}epochs.h5'.format(best_epoch)
    rnn.import_model(os.path.join(results_dir, model))
    disag_filename = 'disag-out-{}epochs.h5'.format(best_epoch)
    output = HDFDataStore(os.path.join(results_dir, disag_filename), 'w')
    results_file = os.path.join(results_dir,
                                'results-{}epochs.txt'.format(best_epoch))
    rnn.disaggregate(test_mains,
                     output,
                     results_file,
                     train_meter,
                     sample_period=sample_period)
    os.remove(results_file)
    output.close()

    # get predicted curve for the best epoch
    result = DataSet(os.path.join(results_dir, disag_filename))
    res_elec = result.buildings[test_building].elec
    os.remove(os.path.join(results_dir, disag_filename))
    predicted = res_elec[meter_key]
    predicted = predicted.power_series(sample_period=sample_period)
    predicted = next(predicted)
    predicted.fillna(0, inplace=True)
    y1 = np.array(predicted)  # power
    x1 = np.arange(y1.shape[0])  # timestamps

    ground_truth = test_elec[meter_key]
    ground_truth = ground_truth.power_series(sample_period=sample_period)
    ground_truth = next(ground_truth)
    ground_truth.fillna(0, inplace=True)
    y2 = np.array(ground_truth)  # power
    x2 = np.arange(y2.shape[0])  # timestamps

    fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
    ax1.plot(x1, y1, color='r', label='predicted')
    ax1.plot(x2, y2, color='b', label='ground truth')
    ax2.plot(x1, y1, color='r')
    ax3.plot(x2, y2, color='b')
    ax1.set_title('Appliance: {}'.format(meter_key))
    fig.legend()
    fig.savefig(
        os.path.join(results_dir, 'zoomed_new_predicted_vs_ground_truth.png'))
示例#3
0
def plot_datasets_meter():
    """
    Plots the target appliance (labels) of training, validation and test sets.
    """
    windows = {
        'train': [["13-4-2013", "30-9-2013"]],
        'validation': ["13-4-2013", "31-7-2013"],
        'test': ["6-1-2014", "11-1-2014"]
    }

    train = []
    for window in windows['train']:
        t = DataSet('../data/ukdale.h5')
        t.clear_cache()
        t.set_window(start=window[0], end=window[1])
        train += [t]

    validation = DataSet('../data/ukdale.h5')
    validation.clear_cache()
    validation.set_window(start=windows['validation'][0],
                          end=windows['validation'][1])
    test = DataSet('../data/ukdale.h5')
    test.clear_cache()
    test.set_window(start=windows['test'][0], end=windows['test'][1])

    train_buildings = [1]
    val_buildings = [2]
    test_building = 5
    meter_key = 'microwave'

    results_dir = '../results/UKDALE-RNN-lr=1e-05-2018-02-16-18-52-34'

    train_meterlist = []
    val_meterlist = []

    for i, b in enumerate(train_buildings):
        train_elec = train[i].buildings[b].elec
        train_meterlist += [train_elec.submeters()[meter_key]]

    for i in val_buildings:
        val_elec = validation.buildings[i].elec
        val_meterlist += [val_elec.submeters()[meter_key]]

    test_elec = test.buildings[test_building].elec
    test_meter = test_elec.submeters()[meter_key]

    fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=False, sharey=True)
    fig.set_size_inches(15.5, 10.5)
    train_meterlist[0].plot(
        ax=ax1,
        plot_kwargs={
            'color': 'g',
            'label': 'Train set - building {}'.format(train_buildings)
        },
        plot_legend=False)
    val_meterlist[0].plot(
        ax=ax2,
        plot_kwargs={
            'color': 'y',
            'label': 'Validation set - building {}'.format(val_buildings)
        },
        plot_legend=False)
    test_meter.plot(ax=ax3,
                    plot_kwargs={
                        'color': 'r',
                        'label': 'Test set - building {}'.format(test_building)
                    },
                    plot_legend=False)
    ax1.set_title('Appliance: {}'.format(meter_key))
    fig.legend()
    plt.savefig(os.path.join(results_dir, 'datasets.png'))
示例#4
0
def generate_vertices():
    """
    Predicts the power demand of the target appliance using the intermediate models which are exported during training.
    Generates a polygon from those predictions.
    """
    train = DataSet('../data/ukdale.h5')
    train.clear_cache()
    train.set_window(start="13-4-2013", end="31-7-2013")
    test = DataSet('../data/ukdale.h5')
    test.clear_cache()
    test.set_window(start='7-2-2014 08:00:00', end='7-3-2014')

    train_building = 1
    test_building = 5
    sample_period = 6
    meter_key = 'kettle'
    learning_rate = 1e-5

    train_elec = train.buildings[train_building].elec
    test_elec = test.buildings[test_building].elec

    train_meter = train_elec.submeters()[meter_key]
    test_mains = test_elec.mains()

    results_dir = '../results/UKDALE-ACROSS-BUILDINGS-RNN-lr=1e-05-2018-02-03-11-48-12'
    train_logfile = os.path.join(results_dir, 'training.log')
    val_logfile = os.path.join(results_dir, 'validation.log')
    rnn = RNNDisaggregator(train_logfile,
                           val_logfile,
                           learning_rate,
                           init=False)

    verts = []
    zs = []  # epochs
    for z in np.arange(10, 341, 10):

        # disaggregate model
        model = 'UKDALE-RNN-kettle-{}epochs.h5'.format(z)
        rnn.import_model(os.path.join(results_dir, model))
        disag_filename = 'disag-out-{}epochs.h5'.format(z)
        output = HDFDataStore(os.path.join(results_dir, disag_filename), 'w')
        results_file = os.path.join(results_dir,
                                    'results-{}epochs.txt'.format(z))
        rnn.disaggregate(test_mains,
                         output,
                         results_file,
                         train_meter,
                         sample_period=sample_period)
        os.remove(results_file)
        output.close()

        # get predicted curve for epoch=z
        result = DataSet(os.path.join(results_dir, disag_filename))
        res_elec = result.buildings[test_building].elec
        os.remove(os.path.join(results_dir, disag_filename))
        predicted = res_elec[meter_key]
        predicted = predicted.power_series(sample_period=sample_period)
        predicted = next(predicted)
        predicted.fillna(0, inplace=True)
        ys = np.array(predicted)  # power
        xs = np.arange(ys.shape[0])  # timestamps

        verts.append(list(zip(xs, ys)))  # add list of x-y-coordinates
        zs.append(z)

    ground_truth = test_elec[meter_key]
    ground_truth = ground_truth.power_series(sample_period=sample_period)
    ground_truth = next(ground_truth)
    ground_truth.fillna(0, inplace=True)
    ys = np.array(ground_truth)  # power
    xs = np.arange(ys.shape[0])  # timestamps

    verts.append(list(zip(xs, ys)))  # add list of x-y-coordinates
    zs.append(350)

    zs = np.asarray(zs)

    for i in range(len(verts)):
        verts[i].insert(0, [0, np.array([0])])
        verts[i].append([len(verts[i]), np.array([0])])

    pickle.dump(verts, open(os.path.join(results_dir, 'vertices.pkl'), 'wb'))
    pickle.dump(zs, open(os.path.join(results_dir, 'zs.pkl'), 'wb'))
    pickle.dump(ys, open(os.path.join(results_dir, 'ys.pkl'), 'wb'))
示例#5
0
def plot_prediction_over_epochs_ploty():
    """
    Predicts the power demand of the target appliance using the intermediate models which are exported during training.
    Plots the prediction curves using plotly.
    """
    train = DataSet('../data/ukdale.h5')
    train.clear_cache()
    train.set_window(start="13-4-2013", end="31-7-2013")
    test = DataSet('../data/ukdale.h5')
    test.clear_cache()
    test.set_window(start="23-7-2014 10:00:00", end="23-7-2014 11:00:00")

    train_building = 1
    test_building = 5
    sample_period = 6
    meter_key = 'kettle'
    learning_rate = 1e-5

    train_elec = train.buildings[train_building].elec
    test_elec = test.buildings[test_building].elec

    train_meter = train_elec.submeters()[meter_key]
    test_mains = test_elec.mains()

    results_dir = '../results/UKDALE-ACROSS-BUILDINGS-RNN-lr=1e-05-2018-02-03-11-48-12'
    train_logfile = os.path.join(results_dir, 'training.log')
    val_logfile = os.path.join(results_dir, 'validation.log')
    rnn = RNNDisaggregator(train_logfile,
                           val_logfile,
                           learning_rate,
                           init=False)

    data = []

    for i in range(10, 401, 10):
        # disaggregate model
        model = 'UKDALE-RNN-kettle-{}epochs.h5'.format(i)
        rnn.import_model(os.path.join(results_dir, model))
        disag_filename = 'disag-out-{}epochs.h5'.format(i)
        output = HDFDataStore(os.path.join(results_dir, disag_filename), 'w')
        results_file = os.path.join(results_dir,
                                    'results-{}epochs.txt'.format(i))
        rnn.disaggregate(test_mains,
                         output,
                         results_file,
                         train_meter,
                         sample_period=sample_period)
        os.remove(results_file)
        output.close()

        # plot predicted curve for epoch=i
        result = DataSet(os.path.join(results_dir, disag_filename))
        res_elec = result.buildings[test_building].elec
        os.remove(os.path.join(results_dir, disag_filename))
        predicted = res_elec[meter_key]
        predicted = predicted.power_series(sample_period=sample_period)
        predicted = next(predicted)
        predicted.fillna(0, inplace=True)
        power = predicted.tolist()
        length = len(power)
        timestamps = list(range(length))

        x = []
        y = []
        z = []
        ci = int(255 / 420 * i)  # ci = "color index"
        for j in range(length):
            x.append([timestamps[j], timestamps[j]])  # timestamps
            y.append([i, i + 5])  # epochs
            z.append([power[j], power[j]])  # power
        data.append(
            dict(
                z=z,
                x=x,
                y=y,
                colorscale=[[i, 'rgb(%d,%d,255)' % (ci, ci)]
                            for i in np.arange(0, 1.1, 0.1)],
                showscale=False,
                type='surface',
            ))

    # plot ground truth curve as the last curve
    ground_truth = test_elec[meter_key]
    ground_truth = ground_truth.power_series(sample_period=sample_period)
    ground_truth = next(ground_truth)
    ground_truth.fillna(0, inplace=True)
    power = ground_truth.tolist()
    length = len(power)
    timestamps = list(range(length))

    i = 410
    x = []
    y = []
    z = []
    ci = int(255 / 410 * i)  # ci = "color index"
    for j in range(length):
        x.append([timestamps[j], timestamps[j]])  # timestamps
        y.append([i, i + 5])  # epochs
        z.append([power[j], power[j]])  # power
    data.append(
        dict(
            z=z,
            x=x,
            y=y,
            colorscale=[[i, 'rgb(%d,%d,255)' % (ci, ci)]
                        for i in np.arange(0, 1.1, 0.1)],
            showscale=False,
            type='surface',
        ))

    layout = dict(title='prediction over epochs',
                  showlegend=False,
                  scene=dict(xaxis=dict(title='timestamps'),
                             yaxis=dict(title='epochs'),
                             zaxis=dict(title='power'),
                             camera=dict(eye=dict(x=-1.7, y=-1.7, z=0.5))))

    fig = dict(data=data, layout=layout)
    plotly.offline.plot(fig, filename='filled-3d-lines')
示例#6
0
IMPORT = False  # TODO: True if continue training

windows = {
    'train': [['13-4-2013', '13-5-2013'], ['13-6-2013', '13-7-2013'],
              ['13-5-2013', '13-6-2013'], ['13-7-2014', '31-7-2014']],
    'validation': [['13-5-2013', '20-5-2013'], ['13-7-2013', '20-7-2013'],
                   ['13-4-2013', '20-4-2013'], ['7-6-2014', '13-7-2014']],
    'test': ['30-6-2013', '15-7-2013']
}

train = []
print("========== OPEN DATASETS ============")
for window in windows['train']:
    t = DataSet('../data/ukdale.h5')
    t.clear_cache()
    t.set_window(start=window[0], end=window[1])
    train += [t]

validation = []
for window in windows['validation']:
    v = DataSet('../data/ukdale.h5')
    v.clear_cache()
    v.set_window(start=window[0], end=window[1])
    validation += [v]

test = DataSet('../data/ukdale.h5')
test.clear_cache()
test.set_window(start=windows['test'][0], end=windows['test'][1])

train_mainslist = []
示例#7
0
from rnndisaggregator import RNNDisaggregator
from plots import plot_loss


IMPORT = False  # TODO: True if continue training

windows = {
        'train': ["13-4-2013", "31-7-2013"],
        'validation': ["31-7-2013", "31-8-2013"],
        'test': ["13-9-2013", "30-9-2013"]
    }


print("========== OPEN DATASETS ============")
train = DataSet('../data/ukdale.h5')
train.clear_cache()
train.set_window(start=windows['train'][0], end=windows['train'][1])
validation = DataSet('../data/ukdale.h5')
validation.clear_cache()
validation.set_window(start=windows['validation'][0], end=windows['validation'][1])
test = DataSet('../data/ukdale.h5')
test.clear_cache()
test.set_window(start=windows['test'][0], end=windows['test'][1])

train_building = 1
validation_building = 1
test_building = 1
sample_period = 6
meter_key = 'kettle'
learning_rate = 1e-5