Exemplo n.º 1
0
def run_activity_model_training():
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)

    # Run vector output model
    model_args = {
        "learning_rate": 1e-3,
        "hidden_layer_activation": 'tanh',
        "hidden_layers": 1,
        "hidden_layer_units": 120,
        "input_n_units": 120,
        "second_layer_input": 120,
        "n_activities": 25
    }

    create_train_activity_prediction_model(combined1,
                                           model_args=model_args,
                                           model_name='vector_output_activity_synthesis',
                                           epochs=100,
                                           window_size=60 * 24,
                                           future_steps=1,
                                           dt=60,
                                           with_time=True,
                                           batch=128,
                                           load_weights=True,
                                           sensor_id=-1)
Exemplo n.º 2
0
def run_sensor_non_deterministic(folders, model_names, start, n_steps, output_folder):
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)
    input, output = synthesize_sensors_multiple_days(combined1, folders, model_names, start,
                                                     n_steps, deterministic=False)
    if not os.path.exists(output_folder):
        os.mkdir(output_folder)
    numbers = list(map(lambda f2: int(f2.split('_')[1].split('.csv')[0]),
                       filter(lambda f: f.startswith('output'),
                              os.listdir(output_folder))))
    if len(numbers) == 0:
        next_number = 0
    else:
        next_number = max(numbers) + 1
    output.to_csv(os.path.join(output_folder, 'output_%d.csv' % next_number))
    input.to_csv(os.path.join(output_folder, 'input_%d.csv' % next_number))
Exemplo n.º 3
0
def run_sensor_synthesis(folders, model_names, start, n_steps, window_size):
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)
    input, output = synthesize_sensors_multiple_days(combined1, folders, model_names, start,
                                                     n_steps)

    input.to_csv('input.csv')
    output.to_csv('output.csv')
    output.plot()
    plt.legend()
    plt.axvline(input.index[start + window_size])
    plt.title('Predicted data')
    plt.show()

    input.iloc[(start + window_size):(start + window_size + n_steps)].plot()
    plt.legend()
    plt.title('Input data')
    plt.show()
def search(dt=600,
           window_size=360,
           future_steps=144,
           epochs=50,
           with_time=True,
           batch_size=128,
           max_trials=200):
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)

    X, y = prepare_data_future_steps(combined1,
                                     window_size=window_size,
                                     dt=dt,
                                     with_time=with_time,
                                     future_steps=future_steps)
    X_train = X[:-4 * (3600 // dt) * 24, :, :]
    X_val = X[-4 * (3600 // dt) * 24:-2 * (3600 // dt) * 24, :, :]
    X_test = X[-2 * (3600 // dt) * 24:, :, :]

    # For now only sensor 24
    y_train = y[:-4 * (3600 // dt) * 24, :, 0]
    y_val = y[-4 * (3600 // dt) * 24:-2 * (3600 // dt) * 24, :, 0]
    y_test = y[-2 * (3600 // dt) * 24:, :, 0]

    tuner = RandomSearch(FuturePredictionModelHyperparameters(
        window_size=window_size,
        num_features=X.shape[2],
        future_steps=future_steps),
                         objective='val_loss',
                         max_trials=max_trials,
                         directory='test_dir')

    tuner.search_space_summary()

    tuner.search(x=X_train,
                 y=y_train,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_data=(X_val, y_val),
                 callbacks=[IsNanEarlyStopper(monitor='loss')])

    tuner.results_summary()
Exemplo n.º 5
0
def prepare_data_future_steps(d: Dataset,
                              window_size=70,
                              dt=60,
                              with_time=False,
                              future_steps=20,
                              sensor_id=24,
                              features=[24, 6, 5],
                              **kwargs):
    """Prepares sensor data from a dataset for prediction using an LSTM/convolutional network.
    The sensor data is first split into time windows of length dt. All sensors are discarded
    except for those in the features array. A window of past values of size window_size is used
    to predict future_steps ahead for a specific sensor (the sensor with id sensor_id).

    :param with_time: If set to true, time is encoded into a sinusoid with period 24 hours.
    :param sensor_id: The sensor that will be predicted. This is used in the output vector
    :param window_size: The number of past values that will be used
    :param future_steps: The number of future values that will be predicted
    :returns: two numpy arrays, X (input) and y. X has shape [#Samples, #Future steps, #Features]
    and y has shape [#Samples, #Future steps]
    """
    series_sensor_data = d.sensor_values_reshape(dt)
    features_data = series_sensor_data[features]
    series_sensor_data = series_sensor_data[[sensor_id]]
    if with_time:
        seconds_in_day = 24 * 60 * 60
        seconds_past_midnight = \
            series_sensor_data.index.hour * 3600 + \
            series_sensor_data.index.minute * 60 + \
            series_sensor_data.index.second
        series_sensor_data['sin_time'] = np.sin(
            2 * np.pi * seconds_past_midnight / seconds_in_day)
        series_sensor_data['cos_time'] = np.cos(
            2 * np.pi * seconds_past_midnight / seconds_in_day)
        features_data['sin_time'] = np.sin(2 * np.pi * seconds_past_midnight /
                                           seconds_in_day)
        features_data['cos_time'] = np.cos(2 * np.pi * seconds_past_midnight /
                                           seconds_in_day)
    data = np.zeros(
        (len(series_sensor_data), window_size, features_data.shape[1]))
    output = np.zeros(
        (len(series_sensor_data), future_steps, series_sensor_data.shape[1]))
    for i in range(future_steps):
        output[:, i, :] = series_sensor_data.shift(-1 * i)  # Future steps
    for i in range(window_size):
        # 0 -> shift(window_size)
        # 1 -> shift(window_size-1)
        data[:, i, :] = features_data.shift(window_size - i)
    return data[window_size:-future_steps, :, :], output[
        window_size:-future_steps, :]
Exemplo n.º 6
0
def run_sensor_model_training():
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)

    for sensor_id in [24, 5, 6, 9]:
        model_args = {
            "learning_rate": 1e-3,
            "hidden_layer_activation": 'tanh',
            "hidden_layers": 1,
            "hidden_layer_units": 120,
            "input_n_units": 120,
            "second_layer_input": 120
        }
        try:
            create_train_sensor_prediction_model(combined1,
                                                 model_args=model_args,
                                                 model_name='single_step_prediction',
                                                 epochs=100,
                                                 window_size=4 * 24,
                                                 future_steps=1,
                                                 dt=900,  # Predict 30 minutes ahead.
                                                 with_time=True,
                                                 batch=128,
                                                 sensor_id=sensor_id,
                                                 features=[24, 5, 6, 9],
                                                 load_weights=False)
        except Exception as e:
            print(e)

        try:
            create_train_sensor_prediction_model(combined1,
                                                 model_args=model_args,
                                                 model_name='vector_output',
                                                 epochs=1500,
                                                 window_size=24*5,
                                                 future_steps=24,
                                                 dt=3600,  # Predict 1 day ahead.
                                                 with_time=True,
                                                 batch=128,
                                                 sensor_id=sensor_id,
                                                 features=[sensor_id],
                                                 load_weights=False)
        except Exception as e:
            print(e)
        try:
            model = single_sensor_multistep_future_encoder_decoder(
                timesteps=4*24,
                future_timesteps=24,
                n_features=3,
            )
            create_train_sensor_prediction_model(combined1,
                                                 model_args=model_args,
                                                 model_name='encoder_decoder',
                                                 model=model,
                                                 epochs=1500,
                                                 window_size=24 * 5,
                                                 future_steps=24,
                                                 dt=3600,  # Predict 30 minutes ahead.
                                                 with_time=True,
                                                 batch=128,
                                                 sensor_id=sensor_id,
                                                 features=[sensor_id],
                                                 load_weights=False)
        except Exception as e:
            print(e)
Exemplo n.º 7
0
    numbers = list(map(lambda f2: int(f2.split('_')[1].split('.csv')[0]),
                       filter(lambda f: f.startswith('output'),
                              os.listdir(output_folder))))
    if len(numbers) == 0:
        next_number = 0
    else:
        next_number = max(numbers) + 1
    output.to_csv(os.path.join(output_folder, 'output_%d.csv' % next_number))
    input.to_csv(os.path.join(output_folder, 'input_%d.csv' % next_number))

def run_multiple_non_deterministic(folders, model_names, start, n_steps, output_folder, n_reps):
    for _ in tqdm(range(n_reps)):
        run_sensor_non_deterministic(folders, model_names, start, n_steps, output_folder)

if __name__ == '__main__':
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)

    combined1.sensor_data_summary()

    bathroom2 = Dataset.parse('dataset/', 'bathroom2')
    kitchen2 = Dataset.parse('dataset/', 'kitchen2')
    combined2 = bathroom2.combine(kitchen2)

    combined2.sensor_data_summary()

    # Creates LOF and isolation forest plots
    LOF(combined1, ['duration'])
    LOF(combined1, ['start_time'])
    LOF(combined1, ['duration', 'start_time'])