def _ExampleToInputs(self, ex, truncated_length=0, crop_training_sequence_to_notes=False): hparams = copy.deepcopy(constants.DEFAULT_HPARAMS) hparams.crop_training_sequence_to_notes = crop_training_sequence_to_notes filename = ex.features.feature['id'].bytes_list.value[0] sequence, crop_beginning_seconds = data.preprocess_sequence( ex.features.feature['sequence'].bytes_list.value[0], hparams) wav_data = ex.features.feature['audio'].bytes_list.value[0] if crop_training_sequence_to_notes: wav_data = audio_io.crop_wav_data(wav_data, hparams.sample_rate, crop_beginning_seconds, sequence.total_time) spec = data.wav_to_spec(wav_data, hparams=hparams) roll = sequences_lib.sequence_to_pianoroll( sequence, frames_per_second=data.hparams_frames_per_second(hparams), min_pitch=constants.MIN_MIDI_PITCH, max_pitch=constants.MAX_MIDI_PITCH, min_frame_occupancy_for_label=0.0, onset_mode='length_ms', onset_length_ms=32., onset_delay_ms=0.) length = data.wav_to_num_frames( wav_data, frames_per_second=data.hparams_frames_per_second(hparams)) return self._DataToInputs(spec, roll.active, roll.weights, length, filename, truncated_length)
def _ExampleToInputs(self, ex, truncated_length=0): hparams = copy.deepcopy(constants.DEFAULT_HPARAMS) filename = ex.features.feature['id'].bytes_list.value[0] sequence = data.preprocess_sequence( ex.features.feature['sequence'].bytes_list.value[0]) wav_data = ex.features.feature['audio'].bytes_list.value[0] spec = data.wav_to_spec(wav_data, hparams=hparams) roll = sequences_lib.sequence_to_pianoroll( sequence, frames_per_second=data.hparams_frames_per_second(hparams), min_pitch=constants.MIN_MIDI_PITCH, max_pitch=constants.MAX_MIDI_PITCH, min_frame_occupancy_for_label=0.0, onset_mode='length_ms', onset_length_ms=32., onset_delay_ms=0.) length = data.wav_to_num_frames( wav_data, frames_per_second=data.hparams_frames_per_second(hparams)) return self._DataToInputs(spec, roll.active, roll.weights, length, filename, truncated_length)