'data_set': [ '400rpm_v2', '800rpm_v2', '1200rpm_v2', ], 'evaluation_function': [ 'a2e.evaluation.reconstruction_error_cost', 'a2e.evaluation.keras.reconstruction_error_vs_compression_cost', 'a2e.evaluation.keras.uniform_reconstruction_error_vs_compression_cost' ], } config_space = create_config_space() if __name__ == '__main__': experiment = Experiment(auto_datetime_directory=True) experiment.log('config/config', config) experiment.log('config/run_configs', run_configs) experiment.log('config/config_space', str(config_space)) def run_callable(run_config: dict): experiment.print('Loading data') bearing_dataset = load_data(run_config['data_set']) train = bearing_dataset.train(column=config['data_column'], as_numpy=True) test = bearing_dataset.test(column=config['data_column'], as_numpy=True) test_labels = bearing_dataset.test(column=config['data_column'], add_label=True)['label'] threshold_percentile = config['threshold_percentile'] x_train, x_valid, y_train, y_valid = train_test_split( train,
], 'data_columns': [ 'rms', #'crest', #'temperature', ], } run_configs = { 'data_set': config['data_sets'], 'data_column': config['data_columns'], 'fit_mode': config['fit_modes'], 'scaling': config['scalings'], } experiment = Experiment() experiment.log('config/config', config) experiment.log('config/run_configs', run_configs) output_dimension = config['input_size'] - config['prediction_shift'] model = create_lstm_autoencoder(config['input_size'], output_dimension=config['output_size']) def run_callable(run_config: dict): def pre_processing_x(data_frame): numpy_data = data_frame.to_numpy() numpy_data = numpy_data[:-config['prediction_shift'], :] samples = build_samples(numpy_data.flatten(), config['input_size'], target_dimensions=3) if run_config['scaling'] == 'min_max': samples = Scaler(MinMaxScaler, fit_mode=run_config['fit_mode']).fit_transform(numpy_data)