def setUp(self): super().setUp() self.enc = encoder_decoder.MultipleEventSequenceEncoder([ encoder_decoder.OneHotEventSequenceEncoderDecoder( testing_lib.TrivialOneHotEncoding(2)), encoder_decoder.OneHotEventSequenceEncoderDecoder( testing_lib.TrivialOneHotEncoding(3))])
def setUp(self): super().setUp() self.enc = encoder_decoder.ConditionalEventSequenceEncoderDecoder( encoder_decoder.OneHotEventSequenceEncoderDecoder( testing_lib.TrivialOneHotEncoding(2)), encoder_decoder.OneHotEventSequenceEncoderDecoder( testing_lib.TrivialOneHotEncoding(3)))
def setUp(self): super().setUp() self.min_note = 60 self.max_note = 72 self.transpose_to_key = 0 self.med = encoder_decoder.OneHotEventSequenceEncoderDecoder( melody_encoder_decoder.MelodyOneHotEncoding(self.min_note, self.max_note))
def __init__(self, window_size_seconds, density_bin_ranges): """Initialize a NoteDensityPerformanceControlSignal. Args: window_size_seconds: The size of the window, in seconds, used to compute note density (notes per second). density_bin_ranges: List of note density (notes per second) bin boundaries to use when quantizing. The number of bins will be one larger than the list length. """ self._window_size_seconds = window_size_seconds self._density_bin_ranges = density_bin_ranges self._encoder = encoder_decoder.OneHotEventSequenceEncoderDecoder( self.NoteDensityOneHotEncoding(density_bin_ranges))
def __init__(self, min_note=DEFAULT_MIN_NOTE, max_note=DEFAULT_MAX_NOTE) -> None: # self._note_counts = OrderedDict() # self._note_pieces = {} # self._note_index = {} # self._index_note = {} self._songs = [] self._song_count = 0 self._min_note = min_note self._max_note = max_note self._encoder_decoder = encoder_decoder.OneHotEventSequenceEncoderDecoder( melody_encoder_decoder.MelodyOneHotEncoding(min_note, max_note) ) # min_note=DEFAULT_MIN_NOTE, max_note=DEFAULT_MAX_NOTE # Additional labels are NO_EVENT = 0 and NOTE_OFF = 1 assert (self._encoder_decoder.input_size, max_note - min_note + 2) assert (self._encoder_decoder.num_classes, max_note - min_note + 2)
def __init__(self, split_in_bar_chunks=4, min_note=DEFAULT_MIN_NOTE, max_note=DEFAULT_MAX_NOTE, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER) -> None: self._song_parts_lead = [] self._song_parts_accomp = [] self._min_note = min_note self._max_note = max_note self._split_in_bar_chunks = split_in_bar_chunks self._steps_per_quarter = steps_per_quarter self.stats = dict() self.midi_names = [] self.splits_per_midi = [] self.vocab = [-2, -1] + list(range(self._min_note, self._max_note)) self.counter = 0 self._encoder_decoder = encoder_decoder.OneHotEventSequenceEncoderDecoder( melody_encoder_decoder.MelodyOneHotEncoding(min_note, max_note)) # Additional labels are NO_EVENT = 0 and NOTE_OFF = 1 assert (self._encoder_decoder.input_size, max_note - min_note + 2) assert (self._encoder_decoder.num_classes, max_note - min_note + 2)
input_file2 = mel_path + 'mel2.mid' out_file1 = mel_path + 'mel1_out.mid' out_file2 = mel_path + 'mel2_out.mid' out_file1_trans = mel_path + 'mel1_trans_out.mid' out_file1_pred = mel_path + 'mel1_pred_out.mid' # FOR IDEAS OF USING THE OUTPUT (hot encodings) DATA FROM THIS, SEE EVENTUALLY magenta.models.shared.eventss_rnn_model.py melody1 = melodies_lib.midi_file_to_melody(input_file1) seq = melody1.to_sequence() midi_io.sequence_proto_to_midi_file(seq, out_file1) min_note = 60 max_note = 72 transpose_to_key = 2 mel_encoder = encoder_decoder.OneHotEventSequenceEncoderDecoder( melody_encoder_decoder.MelodyOneHotEncoding(min_note, max_note)) # min_note=DEFAULT_MIN_NOTE, max_note=DEFAULT_MAX_NOTE # Additional labels are NO_EVENT = 0 and NOTE_OFF = 1 assert(mel_encoder.input_size, max_note - min_note + 2) assert(mel_encoder.num_classes, max_note - min_note + 2) # squeeze midi into octaves determined by min_note and max_note and transposes to key = 0 => C major / A minor melody1.squash( min_note, max_note, transpose_to_key) inputs, labels = mel_encoder.encode(melody1) print(inputs) print(labels)
def setUp(self): super().setUp() self.enc = encoder_decoder.OneHotEventSequenceEncoderDecoder( testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)))
note_seq_raw = midi_io.midi_file_to_note_sequence(input_file) note_seq_quan = note_seq.quantize_note_sequence(note_seq_raw, steps_per_quarter) extracted_seqs, stats = polyphony_lib.extract_polyphonic_sequences( note_seq_quan) assert (len(extracted_seqs <= 1) ) # docs states that only one poly list are extracted poly_seq = extracted_seqs[0] print(poly_seq) seq1 = poly_seq.to_sequence() #qpm=60.0 midi_io.sequence_proto_to_midi_file(seq1, out_file) poly_encoder = encoder_decoder.OneHotEventSequenceEncoderDecoder( polyphony_encoder_decoder.PolyphonyOneHotEncoding()) if len(note_seq_raw.key_signatures) > 1: print( "WARNING: more than one key signatures were found - only the first signature is used." ) original_key = note_seq_raw.key_signatures[0].key transpose_interval = transpose_to_key - original_key # PolyphonicSequence doesn't have a transpose function (like Music for monohonic) for e in poly_seq: if e.pitch != None: e.pitch = e.pitch + transpose_interval seq1_trans = poly_seq.to_sequence() #qpm=60.0 midi_io.sequence_proto_to_midi_file(seq1_trans, out_file_trans)