"Choose a resolution factor. (e.g. Resolution_Factor=24: 1/8 Resolution, 12: 1/16 Resolution, 6: 1/32 Resolution, etc...)"
)
resolution_factor = int(
    input('Resolution Factor (recommended=12):'
          ))  #24: 1/8 Resolution, 12: 1/16 Resolution, 6: 1/32 Resolution

#Preprocessing: Get highest and lowest notes + maximum midi_ticks overall midi files
chord_lowest_note, chord_highest_note, chord_ticks = data_utils_train.getNoteRangeAndTicks(
    chord_train_files, res_factor=resolution_factor)
mel_lowest_note, mel_highest_note, mel_ticks = data_utils_train.getNoteRangeAndTicks(
    mel_train_files, res_factor=resolution_factor)

#Create Piano Roll Representation of the MIDI files. Return: 3-dimensional array or shape (num_midi_files, maximum num of ticks, note range)
chord_roll = data_utils_train.fromMidiCreatePianoRoll(
    chord_train_files,
    chord_ticks,
    chord_lowest_note,
    chord_highest_note,
    res_factor=resolution_factor)
mel_roll = data_utils_train.fromMidiCreatePianoRoll(
    mel_train_files,
    mel_ticks,
    mel_lowest_note,
    mel_highest_note,
    res_factor=resolution_factor)

#Double each chord_roll and mel_roll. Preprocessing to create Input and Target Vector for Network
double_chord_roll = data_utils_train.doubleRoll(chord_roll)
double_mel_roll = data_utils_train.doubleRoll(mel_roll)

#Create Network Inputs:
#Input_data Shape: (num of training samples, num of timesteps=sequence length, note range)
print("LSTM RNN Trainer:")
print()
chord_train_files = glob.glob("%s*.mid" %(chord_train_dir))
mel_train_files = glob.glob("%s*.mid" %(mel_train_dir))



print("Choose a resolution factor. (e.g. Resolution_Factor=24: 1/8 Resolution, 12: 1/16 Resolution, 6: 1/32 Resolution, etc...)")
resolution_factor = int(input('Resolution Factor (recommended=12):')) #24: 1/8 Resolution, 12: 1/16 Resolution, 6: 1/32 Resolution

#Preprocessing: Get highest and lowest notes + maximum midi_ticks overall midi files
chord_lowest_note, chord_highest_note, chord_ticks = data_utils_train.getNoteRangeAndTicks(chord_train_files, res_factor=resolution_factor)
mel_lowest_note, mel_highest_note, mel_ticks = data_utils_train.getNoteRangeAndTicks(mel_train_files, res_factor=resolution_factor)

#Create Piano Roll Representation of the MIDI files. Return: 3-dimensional array or shape (num_midi_files, maximum num of ticks, note range)
chord_roll = data_utils_train.fromMidiCreatePianoRoll(chord_train_files, chord_ticks, chord_lowest_note, chord_highest_note,
                                                res_factor=resolution_factor)
mel_roll = data_utils_train.fromMidiCreatePianoRoll(mel_train_files, mel_ticks, mel_lowest_note, mel_highest_note,
                                              res_factor=resolution_factor)



#Double each chord_roll and mel_roll. Preprocessing to create Input and Target Vector for Network
double_chord_roll = data_utils_train.doubleRoll(chord_roll)
double_mel_roll = data_utils_train.doubleRoll(mel_roll)

#Create Network Inputs:
#Input_data Shape: (num of training samples, num of timesteps=sequence length, note range)
#Target_data Shape: (num of training samples, note range)
input_data, target_data = data_utils_train.createNetInputs(double_chord_roll, double_mel_roll, seq_length=chord_ticks)
input_data = input_data.astype(np.bool)
target_data = target_data.astype(np.bool)