def mean():
    # METRICS
    ALGO = 'Mean'
    scores[ALGO] = {}
    aggregate_predictions = None
    for appliance in APPLIANCES:
        y_true_fname = join(
            BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance],
            'targets.npy')
        y_true = np.load(y_true_fname)
        n = len(y_true)
        y_pred = np.zeros(n) + y_true.mean()
        print(appliance, y_true.mean())
        scores[ALGO][appliance] = run_metrics(
            y_true, y_pred, mains.values)

        if aggregate_predictions is None:
            aggregate_predictions = y_pred
        else:
            n_agg = min(len(aggregate_predictions), len(y_pred))
            aggregate_predictions = aggregate_predictions[:n_agg]
            aggregate_predictions += y_pred[:n_agg]

    scores[ALGO] = across_all_appliances(
        scores[ALGO], mains, aggregate_predictions)
Beispiel #2
0
def calculate_metrics():
    scores = {}
    for architecture in ['ae', 'rectangles']:
        scores[architecture] = {}
        for appliance, buildings in APPLIANCES:
            scores[architecture][appliance] = {}
            aggregate_predictions = None
            for building_i in buildings:
                y_true, y_pred, mains = load(
                    architecture, building_i, appliance)

                # Truncate
                n = min(len(y_true), len(y_pred))
                y_true = y_true[:n]
                y_pred = y_pred[:n]

                if aggregate_predictions is None:
                    aggregate_predictions = y_pred
                else:
                    n_agg = min(len(aggregate_predictions), len(y_pred))
                    aggregate_predictions = aggregate_predictions[:n_agg]
                    aggregate_predictions += y_pred[:n_agg]

                scores[architecture][appliance][building_i] = run_metrics(
                    y_true, y_pred, mains)

    return scores
def fhmm_metrics():
    # METRICS
    scores['FHMM'] = {}
    aggregate_predictions = None
    for appliance_type in APPLIANCES:
        y_pred = np.load(
            join(BASE_DIRECTORY, 'FHMM_' + appliance_type + '.npy'))
        y_true_fname = join(
            BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance_type],
            'targets.npy')
        y_true = np.load(y_true_fname)
        n = min(len(y_true), len(y_pred))
        y_true = y_true[:n]
        y_pred = y_pred[:n]

        if appliance_type in ['fridge freezer']:
            m = fridge_mains
        elif appliance_type in ['kettle']:
            m = kettle_mains
        else:
            m = mains

        if PLOT:
            fig, axes = plt.subplots(nrows=2, sharex=True)
            axes[0].plot(y_true[:20000], label='y_true')
            axes[0].plot(y_pred[:20000], label='y_pred')
            axes[0].set_title(appliance_type)
            axes[0].legend()
            axes[1].plot(m.values[:20000], label='mains')
            axes[1].set_title('Mains')
            plt.show(block=True)

        scores['FHMM'][appliance_type] = run_metrics(
            y_true, y_pred, m.values)

        if aggregate_predictions is None:
            aggregate_predictions = y_pred
        else:
            n_agg = min(len(aggregate_predictions), len(y_pred))
            aggregate_predictions = aggregate_predictions[:n_agg]
            aggregate_predictions += y_pred[:n_agg]

    scores['FHMM'] = across_all_appliances(
        scores['FHMM'], mains, aggregate_predictions)