def _parse_nsynth(example): """Parsing function for NSynth dataset.""" wave, label = example['audio'], example['pitch'] wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis], length, channels)[0] one_hot_label = tf.one_hot(label_index_table.lookup(label), depth=len(pitches)) return wave, one_hot_label
def _parse_nsynth(record): """Parsing function for NSynth dataset.""" features = { 'pitch': tf.FixedLenFeature([1], dtype=tf.int64), 'audio': tf.FixedLenFeature([length], dtype=tf.float32), 'qualities': tf.FixedLenFeature([10], dtype=tf.int64), 'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64), 'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64), } example = tf.parse_single_example(record, features) wave, label = example['audio'], example['pitch'] wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis], length, channels)[0] one_hot_label = tf.one_hot(label_index_table.lookup(label), depth=len(pitches))[0] return wave, one_hot_label, label, example['instrument_source']
def _parse_nsynth(record): """Parsing function for NSynth dataset.""" features = { 'pitch': tf.FixedLenFeature([1], dtype=tf.int64), 'audio': tf.FixedLenFeature([length], dtype=tf.float32), 'qualities': tf.FixedLenFeature([10], dtype=tf.int64), 'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64), 'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64), } example = tf.parse_single_example(record, features) wave, label = example['audio'], example['pitch'] wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis], length, channels)[0] one_hot_label = tf.one_hot( label_index_table.lookup(label), depth=len(pitches))[0] return wave, one_hot_label, label, example['instrument_source']
def _parse_reverbsynth(record_batch): n_samples = 64000 # Create a description of the features feature_description = { 'audio': tf.io.FixedLenFeature([n_samples], tf.float32), 'pitch': tf.io.FixedLenFeature(1, tf.int64), } # Parse the input `tf.Example` proto using the dictionary above example = tf.io.parse_example(record_batch, feature_description) wave, label = example['audio'], example['pitch'][0] wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis], length, channels)[0] one_hot_label = tf.one_hot( label_index_table.lookup(label), depth=len(pitches)) return wave, one_hot_label