Ejemplo n.º 1
0
def save_preproccesing_parameters(dataset_dir):
    y_dir = os.path.join(dataset_dir, 'train', 'y')
    mean_path = os.path.join(dataset_dir, 'transform_0_mean.txt')
    std_path = os.path.join(dataset_dir, 'transform_0_std.txt')

    train_set = Datafolder_soundfiles(y_paths=walk_dir(y_dir))
    training_data_loader = DataLoader(train_set, batch_size=1, num_workers=2)

    scaler0 = StandardScaler()
    t_0 = transform_tf(feature_transform_0())

    for index, x, y in training_data_loader:
        y = y[0].numpy()

        data, Y_m, Y_a, length = t_0(y=y)
        scaler0.partial_fit(data.numpy().T)  # each time step is treated equally

    np.savetxt(mean_path, scaler0.mean_)
    np.savetxt(std_path, scaler0.scale_)
    print(scaler0.mean_)
    print(scaler0.scale_)
Ejemplo n.º 2
0
# training using a phase sesitive masks, ReLU activation on the output
p = {
    'experiment_name':
    'BLSTM_A11',
    'model_class':
    BLSTM_A,
    'model_kwargs': {
        'input_size': 100,
        'output_size': 129,
        'hidden_size': 384,
        'LSTM_layers': 2,
        'output_act_f': 'ReLU'
    },
    'input_transform':
    transform_tf(
        feature_transform_0(os.path.join('data', 'processed', 'dataset_5')),
        ideal_phase_sensitive_target),
    'output_transform':
    apply_mask,
    'training_set':
    load_dataset_5_train,
    'validation_set':
    load_dataset_4_val,
    'batch_size':
    10,
    'epochs_max':
    57,
    'samples_per_epoch':
    21384,  # length of dataset_3 and 4
    'criterion':
    nn.MSELoss(),
Ejemplo n.º 3
0
# batch_size is douple, but not faster or slower
p = {
    'experiment_name':
    'BLSTM_A14',
    'model_class':
    BLSTM_A,
    'model_kwargs': {
        'input_size': 100,
        'output_size': 129,
        'hidden_size': 384,
        'LSTM_layers': 2
    },
    'input_transform':
    transform_tf(
        feature_transform_0(os.path.join('data', 'processed', 'dataset_5')),
        ideal_amplitude_target),
    'output_transform':
    apply_mask,
    'training_set':
    load_dataset_5_train,
    'validation_set':
    load_dataset_4_val,
    'batch_size':
    10 * 2,
    'epochs_max':
    57,
    'samples_per_epoch':
    21384,  # length of dataset_3 and 4
    'criterion':
    nn.MSELoss(),