コード例 #1
0
    def testFromRelativeQuantizedNoteSequence(self):
        self.note_sequence.tempos.add(qpm=60.0)
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(60, 100, 0.0, 4.0),
                                           (64, 100, 0.0, 3.0),
                                           (67, 100, 1.0, 2.0)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=100)
        performance = performance_lib.MetricPerformance(quantized_sequence)

        self.assertEqual(100, performance.steps_per_quarter)

        pe = performance_lib.PerformanceEvent
        expected_performance = [
            pe(pe.NOTE_ON, 60),
            pe(pe.NOTE_ON, 64),
            pe(pe.TIME_SHIFT, 100),
            pe(pe.NOTE_ON, 67),
            pe(pe.TIME_SHIFT, 100),
            pe(pe.NOTE_OFF, 67),
            pe(pe.TIME_SHIFT, 100),
            pe(pe.NOTE_OFF, 64),
            pe(pe.TIME_SHIFT, 100),
            pe(pe.NOTE_OFF, 60),
        ]
        self.assertEqual(expected_performance, list(performance))
コード例 #2
0
 def new_performance(quantized_sequence, start_step, track=track):
   return performance_lib.MetricPerformance(
       quantized_sequence=quantized_sequence,
       steps_per_quarter=(self._steps_per_quarter
                          if quantized_sequence is None else None),
       start_step=start_step,
       num_velocity_bins=self._num_velocity_bins,
       program=track.program, is_drum=track.is_drum)
コード例 #3
0
  def testToSequenceRelativeQuantized(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)
    performance = performance_lib.MetricPerformance(quantized_sequence)
    performance_ns = performance.to_sequence(qpm=60.0)

    # Make comparison easier by sorting.
    performance_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
    self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))

    self.assertEqual(self.note_sequence, performance_ns)
コード例 #4
0
    def _to_single_notesequence(self, samples, controls):
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        seconds_per_step = 60.0 / (self._steps_per_quarter * qpm)
        chunk_size_steps = self._steps_per_bar * self._chunk_size_bars

        seq = music_pb2.NoteSequence()
        seq.tempos.add().qpm = qpm
        seq.ticks_per_quarter = mm.STANDARD_PPQ

        tracks = [[] for _ in range(self._max_num_instruments)]
        all_timed_chords = []

        for chunk_index, encoded_chunk in enumerate(samples):
            chunk_step_offset = chunk_index * chunk_size_steps

            # Decode all tracks in this chunk into performance representation.
            # We don't immediately convert to NoteSequence as we first want to group
            # by track and concatenate.
            for instrument, encoded_track in enumerate(encoded_chunk):
                track_tokens = np.argmax(encoded_track, axis=-1)

                # Trim to end token.
                if self.end_token in track_tokens:
                    idx = track_tokens.tolist().index(self.end_token)
                    track_tokens = track_tokens[:idx]

                # Handle program token. If there are extra program tokens, just use the
                # first one.
                program_tokens = [
                    token for token in track_tokens
                    if token >= self._performance_encoding.num_classes
                ]
                track_token_indices = [
                    idx for idx, t in enumerate(track_tokens)
                    if t < self._performance_encoding.num_classes
                ]
                track_tokens = [
                    track_tokens[idx] for idx in track_token_indices
                ]
                if not program_tokens:
                    program = 0
                    is_drum = False
                else:
                    program = program_tokens[
                        0] - self._performance_encoding.num_classes
                    if program == mm.MAX_MIDI_PROGRAM + 1:
                        # This is the drum program.
                        program = 0
                        is_drum = True
                    else:
                        is_drum = False

                # Decode the tokens into a performance track.
                track = performance_lib.MetricPerformance(
                    quantized_sequence=None,
                    steps_per_quarter=self._steps_per_quarter,
                    start_step=0,
                    num_velocity_bins=self._num_velocity_bins,
                    program=program,
                    is_drum=is_drum)
                for token in track_tokens:
                    track.append(
                        self._performance_encoding.decode_event(token))

                if controls is not None:
                    # Get the corresponding chord and time for each event in the track.
                    # This is a little tricky since we removed extraneous program tokens
                    # when constructing the track.
                    track_chord_tokens = np.argmax(
                        controls[chunk_index][instrument], axis=-1)
                    track_chord_tokens = [
                        track_chord_tokens[idx] for idx in track_token_indices
                    ]
                    chords = [
                        self._chord_encoding.decode_event(token)
                        for token in track_chord_tokens
                    ]
                    chord_times = [
                        (chunk_step_offset + step) * seconds_per_step
                        for step in track.steps if step < chunk_size_steps
                    ]
                    all_timed_chords += zip(chord_times, chords)

                # Make sure the track has the proper length in time steps.
                track.set_length(chunk_size_steps)

                # Aggregate by instrument.
                tracks[instrument].append(track)

        # Concatenate all of the track chunks for each instrument.
        for instrument, track_chunks in enumerate(tracks):
            if track_chunks:
                track = track_chunks[0]
                for t in track_chunks[1:]:
                    for e in t:
                        track.append(e)

            track_seq = track.to_sequence(instrument=instrument, qpm=qpm)
            seq.notes.extend(track_seq.notes)

        # Set total time.
        if seq.notes:
            seq.total_time = max(note.end_time for note in seq.notes)

        if self._chord_encoding:
            # Sort chord times from all tracks and add to the sequence.
            all_chord_times, all_chords = zip(*sorted(all_timed_chords))
            chords_lib.add_chords_to_sequence(seq, all_chords, all_chord_times)

        return seq