Ejemplo n.º 1
0
        results_save['fn_' + pause_str] = list()
        results_save['tp_' + pause_str] = list()
else:
    results_save = dict()
    for pause_str in pause_str_list + onset_str_list:
        results_save['f_scores_' + pause_str] = list()
        results_save['tn_' + pause_str] = list()
        results_save['fp_' + pause_str] = list()
        results_save['fn_' + pause_str] = list()
        results_save['tp_' + pause_str] = list()
results_save['train_losses'], results_save['test_losses'], results_save[
    'indiv_perf'], results_save['test_losses_l1'] = [], [], [], []

# %% Training
for epoch in range(0, num_epochs):
    model.train()
    t_epoch_strt = t.time()
    loss_list = []
    model.change_batch_size_reset_states(train_batch_size)

    if onset_test_flag:
        # setup results_dict
        train_results_dict = dict()
        #            losses_dict = dict()
        train_results_lengths = train_dataset.get_results_lengths()
        for file_name in train_file_list:
            #            for g_f in ['g','f']:
            for g_f in data_select_dict[data_set_select]:
                # create new arrays for the results
                train_results_dict[file_name + '/' + g_f] = np.zeros(
                    [train_results_lengths[file_name], prediction_length])

results_save = dict()
for pause_str in pause_str_list + overlap_str_list + onset_str_list:
    results_save['f_scores_' + pause_str] = list()
    results_save['tn_' + pause_str] = list()
    results_save['fp_' + pause_str] = list()
    results_save['fn_' + pause_str] = list()
    results_save['tp_' + pause_str] = list()
results_save['train_losses'], results_save['test_losses'], results_save['indiv_perf'], results_save[
    'test_losses_l1'] = [], [], [], []


# %% Training
for epoch in range(0, num_epochs):
    model.train() #tells model you are in training mode, so e.g. apply dropout
    t_epoch_strt = t.time()
    loss_list = []
    model.change_batch_size_reset_states(train_batch_size)

    if onset_test_flag: #this is set to true at top of file
        # setup results_dict
        train_results_dict = dict()
        #            losses_dict = dict()
        train_results_lengths = train_dataset.get_results_lengths()
        for file_name in train_file_list:
            for g_f in data_select_dict[data_set_select]:
                # create new arrays for the onset results (the continuous predictions)
                train_results_dict[file_name + '/' + g_f] = np.zeros(
                    [train_results_lengths[file_name], prediction_length])
                train_results_dict[file_name + '/' + g_f][:] = np.nan