Esempio n. 1
0
    def test_build_samples_from_2d(self):
        data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
        expected_samples_2 = [[1, 2], [4, 5], [7, 8]]
        expected_samples_3 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
        actual_data_2 = build_samples(data, 2).tolist()
        actual_data_3 = build_samples(data, 3).tolist()

        self.assertListEqual(actual_data_2, expected_samples_2)
        self.assertListEqual(actual_data_3, expected_samples_3)
Esempio n. 2
0
    def pre_processing(data_frame):
        if run_config['scaling'] == 'min_max':
            samples = Scaler(MinMaxScaler, fit_mode=run_config['fit_mode']).fit_transform(data_frame.to_numpy())
        else:
            samples = data_frame.to_numpy()

        return build_samples(samples, target_sample_length=config['input_size'], target_dimensions=3)
Esempio n. 3
0
    def pre_processing_y(data_frame):
        numpy_data = data_frame.to_numpy()
        numpy_data = numpy_data[config['prediction_shift']:, :]
        samples = build_samples(numpy_data.flatten(), config['output_size'], target_dimensions=3)

        if run_config['scaling'] is 'min_max':
            samples = Scaler(MinMaxScaler, fit_mode=run_config['fit_mode']).fit_transform(numpy_data)

        return samples
Esempio n. 4
0
    def pre_processing(data_frame: DataFrame):
        samples = build_samples(data_frame.to_numpy().flatten(),
                                config['input_size'])

        if run_config['fit_mode'] == 'train':
            return train_scaler.transform(samples)
        else:
            return Scaler(
                MinMaxScaler,
                fit_mode=run_config['fit_mode']).fit_transform(samples)
Esempio n. 5
0
File: lstm.py Progetto: maechler/a2e
    def pre_processing(data_frame):
        samples = build_samples(data_frame.to_numpy().flatten(),
                                config['input_size'],
                                target_dimensions=3)

        if run_config['scaling'] == 'min_max':
            samples = Scaler(MinMaxScaler,
                             fit_mode=run_config['fit_mode']).fit_transform(
                                 data_frame.to_numpy())

        return samples
Esempio n. 6
0
def run_callable(run_config: dict):
    def pre_processing(data_frame: DataFrame):
        samples = build_samples(data_frame.to_numpy().flatten(),
                                config['input_size'])

        if run_config['fit_mode'] == 'train':
            return train_scaler.transform(samples)
        else:
            return Scaler(
                MinMaxScaler,
                fit_mode=run_config['fit_mode']).fit_transform(samples)

    experiment.print('Loading data')
    bearing_dataset = load_data(run_config['data_set'])
    data_frames = bearing_dataset.as_dict(column=run_config['data_column'])
    train_samples = build_samples(data_frames['train'].to_numpy().flatten(),
                                  config['input_size'])
    fit_mode = 'per_feature' if run_config[
        'fit_mode'] == 'train' else run_config['fit_mode']
    train_scaler = Scaler(MinMaxScaler, fit_mode=fit_mode)
    train_samples = train_scaler.fit_transform(train_samples)

    experiment.print('Fitting model')
    history = model.fit(
        train_samples,
        train_samples,
        verbose=0,
        epochs=config['epochs'],
        callbacks=experiment.keras_callbacks(),
        validation_split=config['validation_split'],
        shuffle=config['shuffle'],
    )

    experiment.log_history(history)
    experiment.log_keras_model(model)
    experiment.log_keras_predictions(
        model=model,
        data_frames=data_frames,
        pre_processing=pre_processing,
    )