コード例 #1
0
    def testEventListChordsWithMelodies(self):
        note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
        note_sequence.tempos.add(qpm=60.0)
        testing_lib.add_chords_to_sequence(note_sequence,
                                           [('N.C.', 0), ('C', 2), ('G7', 6)])
        note_sequence.total_time = 8.0

        melodies = [
            melodies_lib.Melody([60, -2, -2, -1],
                                start_step=0,
                                steps_per_quarter=1,
                                steps_per_bar=4),
            melodies_lib.Melody([62, -2, -2, -1],
                                start_step=4,
                                steps_per_quarter=1,
                                steps_per_bar=4),
        ]

        quantized_sequence = sequences_lib.quantize_note_sequence(
            note_sequence, steps_per_quarter=1)
        chords = chords_lib.event_list_chords(quantized_sequence, melodies)

        expected_chords = [[NO_CHORD, NO_CHORD, 'C', 'C'],
                           ['C', 'C', 'G7', 'G7']]

        self.assertEqual(expected_chords, chords)
コード例 #2
0
ファイル: chords_lib_test.py プロジェクト: adarob/magenta
  def testEventListChordsWithMelodies(self):
    note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_chords_to_sequence(
        note_sequence, [('N.C.', 0), ('C', 2), ('G7', 6)])
    note_sequence.total_time = 8.0

    melodies = [
        melodies_lib.Melody([60, -2, -2, -1],
                            start_step=0, steps_per_quarter=1, steps_per_bar=4),
        melodies_lib.Melody([62, -2, -2, -1],
                            start_step=4, steps_per_quarter=1, steps_per_bar=4),
    ]

    quantized_sequence = sequences_lib.quantize_note_sequence(
        note_sequence, steps_per_quarter=1)
    chords = chords_lib.event_list_chords(quantized_sequence, melodies)

    expected_chords = [
        [NO_CHORD, NO_CHORD, 'C', 'C'],
        ['C', 'C', 'G7', 'G7']
    ]

    self.assertEqual(expected_chords, chords)
コード例 #3
0
    def _quantized_subsequence_to_tensors(self, quantized_subsequence):
        # Reject sequences with out-of-range pitches.
        if any(note.pitch < self._min_pitch or note.pitch > self._max_pitch
               for note in quantized_subsequence.notes):
            return [], []

        # Extract all instruments.
        tracks, _ = mm.extract_performances(
            quantized_subsequence,
            max_steps_truncate=self._max_steps_truncate,
            num_velocity_bins=self._num_velocity_bins,
            split_instruments=True)

        # Reject sequences with too few instruments.
        if not (self._min_num_instruments <= len(tracks) <=
                self._max_num_instruments):
            return [], []

        # Sort tracks by program, with drums at the end.
        tracks = sorted(tracks, key=lambda t: (t.is_drum, t.program))

        chunk_size_steps = self._steps_per_bar * self._chunk_size_bars
        chunks = [[] for _ in range(self._max_num_chunks)]

        total_length = 0

        for track in tracks:
            # Make sure the track is the proper number of time steps.
            track.set_length(self._max_steps_truncate)

            # Split this track into chunks.
            def new_performance(quantized_sequence, start_step, track=track):
                return performance_lib.MetricPerformance(
                    quantized_sequence=quantized_sequence,
                    steps_per_quarter=(self._steps_per_quarter if
                                       quantized_sequence is None else None),
                    start_step=start_step,
                    num_velocity_bins=self._num_velocity_bins,
                    program=track.program,
                    is_drum=track.is_drum)

            track_chunks = split_performance(track,
                                             chunk_size_steps,
                                             new_performance,
                                             clip_tied_notes=True)

            assert len(track_chunks) == self._max_num_chunks

            track_chunk_lengths = [
                len(track_chunk) for track_chunk in track_chunks
            ]
            # Each track chunk needs room for program token and end token.
            if not all(l <= self._max_events_per_instrument - 2
                       for l in track_chunk_lengths):
                return [], []
            if not all(mm.MIN_MIDI_PROGRAM <= t.program <= mm.MAX_MIDI_PROGRAM
                       for t in track_chunks if not t.is_drum):
                return [], []

            total_length += sum(track_chunk_lengths)

            # Aggregate by chunk.
            for i, track_chunk in enumerate(track_chunks):
                chunks[i].append(track_chunk)

        # Reject sequences that are too short (in events).
        if total_length < self._min_total_events:
            return [], []

        num_programs = mm.MAX_MIDI_PROGRAM - mm.MIN_MIDI_PROGRAM + 1

        chunk_tensors = []
        chunk_chord_tensors = []

        for chunk_tracks in chunks:
            track_tensors = []

            for track in chunk_tracks:
                # Add a special token for program at the beginning of each track.
                track_tokens = [
                    self._performance_encoding.num_classes +
                    (num_programs if track.is_drum else track.program)
                ]
                # Then encode the performance events.
                for event in track:
                    track_tokens.append(
                        self._performance_encoding.encode_event(event))
                # Then add the end token.
                track_tokens.append(self.end_token)

                encoded_track = data.np_onehot(track_tokens, self.output_depth,
                                               self.output_dtype)
                track_tensors.append(encoded_track)

            if self._chord_encoding:
                # Extract corresponding chords for each track. The chord sequences may
                # be different for different tracks even though the underlying chords
                # are the same, as the performance event times will generally be
                # different.
                try:
                    track_chords = chords_lib.event_list_chords(
                        quantized_subsequence, chunk_tracks)
                except chords_lib.CoincidentChordsException:
                    return [], []

                track_chord_tensors = []

                try:
                    # Chord encoding for all tracks is inside this try block. If any
                    # track fails we need to skip the whole subsequence.

                    for chords in track_chords:
                        # Start with a pad token corresponding to the track program token.
                        track_chord_tokens = [self._control_pad_token]
                        # Then encode the chords.
                        for chord in chords:
                            track_chord_tokens.append(
                                self._chord_encoding.encode_event(chord))
                        # Then repeat the final chord for the track end token.
                        track_chord_tokens.append(track_chord_tokens[-1])

                        encoded_track_chords = data.np_onehot(
                            track_chord_tokens, self.control_depth,
                            self.control_dtype)
                        track_chord_tensors.append(encoded_track_chords)

                except (mm.ChordSymbolException, mm.ChordEncodingException):
                    return [], []

                chunk_chord_tensors.append(track_chord_tensors)

            chunk_tensors.append(track_tensors)

        return chunk_tensors, chunk_chord_tensors
コード例 #4
0
  def _quantized_subsequence_to_tensors(self, quantized_subsequence):
    # Reject sequences with out-of-range pitches.
    if any(note.pitch < self._min_pitch or note.pitch > self._max_pitch
           for note in quantized_subsequence.notes):
      return [], []

    # Extract all instruments.
    tracks, _ = mm.extract_performances(
        quantized_subsequence,
        max_steps_truncate=self._max_steps_truncate,
        num_velocity_bins=self._num_velocity_bins,
        split_instruments=True)

    # Reject sequences with too few instruments.
    if not (self._min_num_instruments <= len(tracks) <=
            self._max_num_instruments):
      return [], []

    # Sort tracks by program, with drums at the end.
    tracks = sorted(tracks, key=lambda t: (t.is_drum, t.program))

    chunk_size_steps = self._steps_per_bar * self._chunk_size_bars
    chunks = [[] for _ in range(self._max_num_chunks)]

    total_length = 0

    for track in tracks:
      # Make sure the track is the proper number of time steps.
      track.set_length(self._max_steps_truncate)

      # Split this track into chunks.
      def new_performance(quantized_sequence, start_step, track=track):
        steps_per_quarter = (
            self._steps_per_quarter if quantized_sequence is None else None)
        return performance_lib.MetricPerformance(
            quantized_sequence=quantized_sequence,
            steps_per_quarter=steps_per_quarter,
            start_step=start_step,
            num_velocity_bins=self._num_velocity_bins,
            program=track.program, is_drum=track.is_drum)
      track_chunks = split_performance(
          track, chunk_size_steps, new_performance, clip_tied_notes=True)

      assert len(track_chunks) == self._max_num_chunks

      track_chunk_lengths = [len(track_chunk) for track_chunk in track_chunks]
      # Each track chunk needs room for program token and end token.
      if not all(l <= self._max_events_per_instrument - 2
                 for l in track_chunk_lengths):
        return [], []
      if not all(mm.MIN_MIDI_PROGRAM <= t.program <= mm.MAX_MIDI_PROGRAM
                 for t in track_chunks if not t.is_drum):
        return [], []

      total_length += sum(track_chunk_lengths)

      # Aggregate by chunk.
      for i, track_chunk in enumerate(track_chunks):
        chunks[i].append(track_chunk)

    # Reject sequences that are too short (in events).
    if total_length < self._min_total_events:
      return [], []

    num_programs = mm.MAX_MIDI_PROGRAM - mm.MIN_MIDI_PROGRAM + 1

    chunk_tensors = []
    chunk_chord_tensors = []

    for chunk_tracks in chunks:
      track_tensors = []

      for track in chunk_tracks:
        # Add a special token for program at the beginning of each track.
        track_tokens = [self._performance_encoding.num_classes + (
            num_programs if track.is_drum else track.program)]
        # Then encode the performance events.
        for event in track:
          track_tokens.append(self._performance_encoding.encode_event(event))
        # Then add the end token.
        track_tokens.append(self.end_token)

        encoded_track = data.np_onehot(
            track_tokens, self.output_depth, self.output_dtype)
        track_tensors.append(encoded_track)

      if self._chord_encoding:
        # Extract corresponding chords for each track. The chord sequences may
        # be different for different tracks even though the underlying chords
        # are the same, as the performance event times will generally be
        # different.
        try:
          track_chords = chords_lib.event_list_chords(
              quantized_subsequence, chunk_tracks)
        except chords_lib.CoincidentChordsError:
          return [], []

        track_chord_tensors = []

        try:
          # Chord encoding for all tracks is inside this try block. If any
          # track fails we need to skip the whole subsequence.

          for chords in track_chords:
            # Start with a pad token corresponding to the track program token.
            track_chord_tokens = [self._control_pad_token]
            # Then encode the chords.
            for chord in chords:
              track_chord_tokens.append(
                  self._chord_encoding.encode_event(chord))
            # Then repeat the final chord for the track end token.
            track_chord_tokens.append(track_chord_tokens[-1])

            encoded_track_chords = data.np_onehot(
                track_chord_tokens, self.control_depth, self.control_dtype)
            track_chord_tensors.append(encoded_track_chords)

        except (mm.ChordSymbolError, mm.ChordEncodingError):
          return [], []

        chunk_chord_tensors.append(track_chord_tensors)

      chunk_tensors.append(track_tensors)

    return chunk_tensors, chunk_chord_tensors
コード例 #5
0
ファイル: data.py プロジェクト: ThierryGrb/magenta
  def _to_tensors(self, note_sequence):
    """Converts NoteSequence to unique, one-hot tensor sequences."""
    try:
      if self._steps_per_quarter:
        quantized_sequence = mm.quantize_note_sequence(
            note_sequence, self._steps_per_quarter)
        if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
            self._steps_per_bar):
          return ConverterTensors()
      else:
        quantized_sequence = mm.quantize_note_sequence_absolute(
            note_sequence, self._steps_per_second)
    except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,
            mm.NegativeTimeException) as e:
      return ConverterTensors()

    if self._chord_encoding and not any(
        ta.annotation_type == CHORD_SYMBOL
        for ta in quantized_sequence.text_annotations):
      # We are conditioning on chords but sequence does not have chords. Try to
      # infer them.
      try:
        mm.infer_chords_for_sequence(quantized_sequence)
      except mm.ChordInferenceException:
        return ConverterTensors()

    event_lists, unused_stats = self._event_extractor_fn(quantized_sequence)
    if self._pad_to_total_time:
      for e in event_lists:
        e.set_length(len(e) + e.start_step, from_left=True)
        e.set_length(quantized_sequence.total_quantized_steps)
    if self._slice_steps:
      sliced_event_lists = []
      for l in event_lists:
        for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):
          sliced_event_lists.append(l[i - self._slice_steps: i])
    else:
      sliced_event_lists = event_lists

    if self._chord_encoding:
      try:
        sliced_chord_lists = chords_lib.event_list_chords(
            quantized_sequence, sliced_event_lists)
      except chords_lib.CoincidentChordsException:
        return ConverterTensors()
      sliced_event_lists = [zip(el, cl) for el, cl in zip(sliced_event_lists,
                                                          sliced_chord_lists)]

    # TODO(adarob): Consider handling the fact that different event lists can
    # be mapped to identical tensors by the encoder_decoder (e.g., Drums).

    unique_event_tuples = list(set(tuple(l) for l in sliced_event_lists))
    unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)

    if not unique_event_tuples:
      return ConverterTensors()

    control_seqs = []
    if self._chord_encoding:
      unique_event_tuples, unique_chord_tuples = zip(
          *[zip(*t) for t in unique_event_tuples if t])
      for t in unique_chord_tuples:
        try:
          chord_tokens = [self._chord_encoding.encode_event(e) for e in t]
          if self.end_token:
            # Repeat the last chord instead of using a special token; otherwise
            # the model may learn to rely on the special token to detect
            # endings.
            chord_tokens.append(chord_tokens[-1] if chord_tokens else
                                self._chord_encoding.encode_event(mm.NO_CHORD))
        except (mm.ChordSymbolException, mm.ChordEncodingException):
          return ConverterTensors()
        control_seqs.append(
            np_onehot(chord_tokens, self.control_depth, self.control_dtype))

    seqs = []
    for t in unique_event_tuples:
      seqs.append(np_onehot(
          [self._legacy_encoder_decoder.encode_event(e) for e in t] +
          ([] if self.end_token is None else [self.end_token]),
          self.output_depth, self.output_dtype))

    return ConverterTensors(inputs=seqs, outputs=seqs, controls=control_seqs)
コード例 #6
0
ファイル: data.py プロジェクト: chenrui2014/magenta-1
  def _to_tensors(self, note_sequence):
    """Converts NoteSequence to unique, one-hot tensor sequences."""
    try:
      if self._steps_per_quarter:
        quantized_sequence = mm.quantize_note_sequence(
            note_sequence, self._steps_per_quarter)
        if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
            self._steps_per_bar):
          return ConverterTensors()
      else:
        quantized_sequence = mm.quantize_note_sequence_absolute(
            note_sequence, self._steps_per_second)
    except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,
            mm.NegativeTimeException) as e:
      return ConverterTensors()

    if self._chord_encoding and not any(
        ta.annotation_type == CHORD_SYMBOL
        for ta in quantized_sequence.text_annotations):
      # We are conditioning on chords but sequence does not have chords. Try to
      # infer them.
      try:
        mm.infer_chords_for_sequence(quantized_sequence)
      except mm.ChordInferenceException:
        return ConverterTensors()

    event_lists, unused_stats = self._event_extractor_fn(quantized_sequence)
    if self._pad_to_total_time:
      for e in event_lists:
        e.set_length(len(e) + e.start_step, from_left=True)
        e.set_length(quantized_sequence.total_quantized_steps)
    if self._slice_steps:
      sliced_event_lists = []
      for l in event_lists:
        for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):
          sliced_event_lists.append(l[i - self._slice_steps: i])
    else:
      sliced_event_lists = event_lists

    if self._chord_encoding:
      try:
        sliced_chord_lists = chords_lib.event_list_chords(
            quantized_sequence, sliced_event_lists)
      except chords_lib.CoincidentChordsException:
        return ConverterTensors()
      sliced_event_lists = [zip(el, cl) for el, cl in zip(sliced_event_lists,
                                                          sliced_chord_lists)]

    # TODO(adarob): Consider handling the fact that different event lists can
    # be mapped to identical tensors by the encoder_decoder (e.g., Drums).

    unique_event_tuples = list(set(tuple(l) for l in sliced_event_lists))
    unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)

    if not unique_event_tuples:
      return ConverterTensors()

    control_seqs = []
    if self._chord_encoding:
      unique_event_tuples, unique_chord_tuples = zip(
          *[zip(*t) for t in unique_event_tuples if t])
      for t in unique_chord_tuples:
        try:
          chord_tokens = [self._chord_encoding.encode_event(e) for e in t]
          if self.end_token:
            # Repeat the last chord instead of using a special token; otherwise
            # the model may learn to rely on the special token to detect
            # endings.
            chord_tokens.append(chord_tokens[-1] if chord_tokens else
                                self._chord_encoding.encode_event(mm.NO_CHORD))
        except (mm.ChordSymbolException, mm.ChordEncodingException):
          return ConverterTensors()
        control_seqs.append(
            np_onehot(chord_tokens, self.control_depth, self.control_dtype))

    seqs = []
    for t in unique_event_tuples:
      seqs.append(np_onehot(
          [self._legacy_encoder_decoder.encode_event(e) for e in t] +
          ([] if self.end_token is None else [self.end_token]),
          self.output_depth, self.output_dtype))

    return ConverterTensors(inputs=seqs, outputs=seqs, controls=control_seqs)