示例#1
0
def quantized_sequence_to_midi(sequence_path, model, model_dir):
    solver = music_pb2.NoteSequence()
    with open(sequence_path, 'r') as f:
        ns = google.protobuf.text_format.Merge(str(f.read()), solver)
    dirname = os.path.join(model_dir, 'midi')
    if not tf.gfile.Exists(dirname):
        tf.gfile.MakeDirs(dirname)
    original_midi_name = str(ns.filename)
    original_midi_name = re.search('(.*?).mid', original_midi_name).group(1)
    quantized_midi_name = f'{original_midi_name}_{model}_quantized.mid'
    output_file = os.path.join(dirname, quantized_midi_name)
    sequence_proto_to_midi_file(ns, output_file)
    return output_file
示例#2
0
def concatenate_sequences(sequences, sequence_durations=None):
    """Concatenate a series of NoteSequences together.

  Individual sequences will be shifted using shift_sequence_times and then
  merged together using the protobuf MergeFrom method. This means that any
  global values (e.g., ticks_per_quarter) will be overwritten by each sequence
  and only the final value will be used. After this, redundant data will be
  removed with remove_redundant_data.

  Args:
    sequences: A list of sequences to concatenate.
    sequence_durations: An optional list of sequence durations to use. If not
      specified, the total_time value will be used. Specifying durations is
      useful if the sequences to be concatenated are effectively longer than
      their total_time (e.g., a sequence that ends with a rest).

  Returns:
    A new sequence that is the result of concatenating *sequences.

  Raises:
    ValueError: If the length of sequences and sequence_durations do not match
        or if a specified duration is less than the total_time of the sequence.
  """
    if sequence_durations and len(sequences) != len(sequence_durations):
        raise ValueError(
            'sequences and sequence_durations must be the same length.')
    current_total_time = 0
    cat_seq = music_pb2.NoteSequence()
    for i in range(len(sequences)):
        sequence = sequences[i]
        if sequence_durations and sequence_durations[i] < sequence.total_time:
            raise ValueError(
                'Specified sequence duration ({}) must not be less than the '
                'total_time of the sequence ({})'.format(
                    sequence_durations[i], sequence.total_time))
        if current_total_time > 0:
            cat_seq.MergeFrom(
                shift_sequence_times(sequence, current_total_time))
        else:
            cat_seq.MergeFrom(sequence)

        if sequence_durations:
            current_total_time += sequence_durations[i]
        else:
            current_total_time = cat_seq.total_time

    # Delete subsequence_info because we've joined several subsequences.
    cat_seq.ClearField('subsequence_info')

    return remove_redundant_data(cat_seq)
示例#3
0
def stretch_note_sequence(note_sequence, stretch_factor):
    """Apply a constant temporal stretch to a NoteSequence proto.

  Args:
    note_sequence: The NoteSequence to stretch.
    stretch_factor: How much to stretch the NoteSequence. Values greater than
        one increase the length of the NoteSequence (making it "slower"). Values
        less than one decrease the length of the NoteSequence (making it
        "faster").

  Returns:
    A stretched copy of the original NoteSequence.

  Raises:
    QuantizationStatusException: If the `note_sequence` is quantized. Only
        unquantized NoteSequences can be stretched.
  """
    if is_quantized_sequence(note_sequence):
        raise QuantizationStatusException(
            'Can only stretch unquantized NoteSequence.')

    stretched_sequence = music_pb2.NoteSequence()
    stretched_sequence.CopyFrom(note_sequence)

    if stretch_factor == 1.0:
        return stretched_sequence

    # Stretch all notes.
    for note in stretched_sequence.notes:
        note.start_time *= stretch_factor
        note.end_time *= stretch_factor
    stretched_sequence.total_time *= stretch_factor

    # Stretch all other event times.
    events = itertools.chain(stretched_sequence.time_signatures,
                             stretched_sequence.key_signatures,
                             stretched_sequence.tempos,
                             stretched_sequence.pitch_bends,
                             stretched_sequence.control_changes,
                             stretched_sequence.text_annotations)
    for event in events:
        event.time *= stretch_factor

    # Stretch tempos.
    for tempo in stretched_sequence.tempos:
        tempo.qpm /= stretch_factor

    return stretched_sequence
示例#4
0
def shift_sequence_times(sequence, shift_seconds):
    """Shifts times in a notesequence.

  Only forward shifts are supported.

  Args:
    sequence: The NoteSequence to shift.
    shift_seconds: The amount to shift.

  Returns:
    A new NoteSequence with shifted times.

  Raises:
    ValueError: If the shift amount is invalid.
    QuantizationStatusException: If the sequence has already been quantized.
  """
    if shift_seconds <= 0:
        raise ValueError('Invalid shift amount: {}'.format(shift_seconds))
    if is_quantized_sequence(sequence):
        raise QuantizationStatusException(
            'Can shift only unquantized NoteSequences.')

    shifted = music_pb2.NoteSequence()
    shifted.CopyFrom(sequence)

    # Delete subsequence_info because our frame of reference has shifted.
    shifted.ClearField('subsequence_info')

    # Shift notes.
    for note in shifted.notes:
        note.start_time += shift_seconds
        note.end_time += shift_seconds

    events_to_shift = [
        shifted.time_signatures, shifted.key_signatures, shifted.tempos,
        shifted.pitch_bends, shifted.control_changes, shifted.text_annotations,
        shifted.section_annotations
    ]

    for event in itertools.chain(*events_to_shift):
        event.time += shift_seconds

    shifted.total_time += shift_seconds

    return shifted
示例#5
0
def trim_note_sequence(sequence, start_time, end_time):
    """Trim notes from a NoteSequence to lie within a specified time range.

  Notes starting before `start_time` are not included. Notes ending after
  `end_time` are truncated.

  Args:
    sequence: The NoteSequence for which to trim notes.
    start_time: The float time in seconds after which all notes should begin.
    end_time: The float time in seconds before which all notes should end.

  Returns:
    A copy of `sequence` with all notes trimmed to lie between `start_time` and
    `end_time`.

  Raises:
    QuantizationStatusException: If the sequence has already been quantized.
  """
    if is_quantized_sequence(sequence):
        raise QuantizationStatusException(
            'Can only trim notes and chords for unquantized NoteSequence.')

    subsequence = music_pb2.NoteSequence()
    subsequence.CopyFrom(sequence)

    del subsequence.notes[:]
    for note in sequence.notes:
        if note.start_time < start_time or note.start_time >= end_time:
            continue
        new_note = subsequence.notes.add()
        new_note.CopyFrom(note)
        new_note.end_time = min(note.end_time, end_time)

    subsequence.total_time = min(sequence.total_time, end_time)

    return subsequence
def run_with_flags(generator):
    """Generates pianoroll tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PianorollRnnNadeSequenceGenerator to use for generation.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return
    output_dir = os.path.expanduser(FLAGS.output_dir)

    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)

    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else 60
    if FLAGS.primer_pitches:
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.tempos.add().qpm = qpm
        primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        for pitch in ast.literal_eval(FLAGS.primer_pitches):
            note = primer_sequence.notes.add()
            note.start_time = 0
            note.end_time = 60.0 / qpm
            note.pitch = pitch
            note.velocity = 100
        primer_sequence.total_time = primer_sequence.notes[-1].end_time
    elif FLAGS.primer_pianoroll:
        primer_pianoroll = magenta.music.PianorollSequence(
            events_list=ast.literal_eval(FLAGS.primer_pianoroll),
            steps_per_quarter=4,
            shift_range=True)
        primer_sequence = primer_pianoroll.to_sequence(qpm=qpm)
    elif primer_midi:
        primer_sequence = magenta.music.midi_file_to_sequence_proto(
            primer_midi)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm
    else:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to empty sequence.')
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.tempos.add().qpm = qpm
        primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ

    # Derive the total number of seconds to generate.
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    generate_end_time = FLAGS.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=primer_sequence.total_time, end_time=generate_end_time)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Total length '
            'requested: %s', generate_section.start_time, generate_end_time)
        return

    generator_options.args['beam_size'].int_value = FLAGS.beam_size
    generator_options.args['branch_factor'].int_value = FLAGS.branch_factor

    tf.logging.info('primer_sequence: %s', primer_sequence)
    tf.logging.info('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(output_dir, midi_filename)
        magenta.music.sequence_proto_to_midi_file(generated_sequence,
                                                  midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
示例#7
0
    def _to_sequence(self,
                     seconds_per_step,
                     velocity,
                     instrument,
                     program,
                     max_note_duration=None):
        sequence_start_time = self.start_step * seconds_per_step

        sequence = music_pb2.NoteSequence()
        sequence.ticks_per_quarter = STANDARD_PPQ

        step = 0

        if self._num_velocity_bins:
            velocity_bin_size = int(
                math.ceil((MAX_MIDI_VELOCITY - MIN_MIDI_VELOCITY + 1) /
                          self._num_velocity_bins))

        # Map pitch to list because one pitch may be active multiple times.
        pitch_start_steps_and_velocities = collections.defaultdict(list)
        for i, event in enumerate(self):
            if event.event_type == PerformanceEvent.NOTE_ON:
                pitch_start_steps_and_velocities[event.event_value].append(
                    (step, velocity))
            elif event.event_type == PerformanceEvent.NOTE_OFF:
                if not pitch_start_steps_and_velocities[event.event_value]:
                    tf.logging.debug(
                        'Ignoring NOTE_OFF at position %d with no previous NOTE_ON'
                        % i)
                else:
                    # Create a note for the pitch that is now ending.
                    pitch_start_step, pitch_velocity = pitch_start_steps_and_velocities[
                        event.event_value][0]
                    pitch_start_steps_and_velocities[event.event_value] = (
                        pitch_start_steps_and_velocities[event.event_value][1:]
                    )
                    if step == pitch_start_step:
                        tf.logging.debug(
                            'Ignoring note with zero duration at step %d' %
                            step)
                        continue
                    note = sequence.notes.add()
                    note.start_time = (pitch_start_step * seconds_per_step +
                                       sequence_start_time)
                    note.end_time = step * seconds_per_step + sequence_start_time
                    if (max_note_duration and note.end_time - note.start_time >
                            max_note_duration):
                        note.end_time = note.start_time + max_note_duration
                    note.pitch = event.event_value
                    note.velocity = pitch_velocity
                    note.instrument = instrument
                    note.program = program
                    if note.end_time > sequence.total_time:
                        sequence.total_time = note.end_time
            elif event.event_type == PerformanceEvent.TIME_SHIFT:
                step += event.event_value
            elif event.event_type == PerformanceEvent.VELOCITY:
                assert self._num_velocity_bins
                velocity = (MIN_MIDI_VELOCITY +
                            (event.event_value - 1) * velocity_bin_size)
            else:
                raise ValueError('Unknown event type: %s' % event.event_type)

        # There could be remaining pitches that were never ended. End them now
        # and create notes.
        for pitch in pitch_start_steps_and_velocities:
            for pitch_start_step, pitch_velocity in pitch_start_steps_and_velocities[
                    pitch]:
                if step == pitch_start_step:
                    tf.logging.debug(
                        'Ignoring note with zero duration at step %d' % step)
                    continue
                note = sequence.notes.add()
                note.start_time = (pitch_start_step * seconds_per_step +
                                   sequence_start_time)
                note.end_time = step * seconds_per_step + sequence_start_time
                if (max_note_duration and
                        note.end_time - note.start_time > max_note_duration):
                    note.end_time = note.start_time + max_note_duration
                note.pitch = pitch
                note.velocity = pitch_velocity
                note.instrument = instrument
                note.program = program
                if note.end_time > sequence.total_time:
                    sequence.total_time = note.end_time

        return sequence
def run_with_flags(generator):
  """Generates performance tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PerformanceRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(output_dir):
    tf.gfile.MakeDirs(output_dir)

  primer_sequence = None
  if FLAGS.primer_pitches:
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
    for pitch in ast.literal_eval(FLAGS.primer_pitches):
      note = primer_sequence.notes.add()
      note.start_time = 0
      note.end_time = 60.0 / magenta.music.DEFAULT_QUARTERS_PER_MINUTE
      note.pitch = pitch
      note.velocity = 100
      primer_sequence.total_time = note.end_time
  elif FLAGS.primer_melody:
    primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence()
  elif primer_midi:
    primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to empty sequence.')
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ

  # Derive the total number of seconds to generate.
  seconds_per_step = 1.0 / generator.steps_per_second
  generate_end_time = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  # Set the start time to begin when the last note ends.
  generate_section = generator_options.generate_sections.add(
      start_time=primer_sequence.total_time,
      end_time=generate_end_time)

  if generate_section.start_time >= generate_section.end_time:
    tf.logging.fatal(
        'Priming sequence is longer than the total number of steps '
        'requested: Priming sequence length: %s, Total length '
        'requested: %s',
        generate_section.start_time, generate_end_time)
    return

  if (FLAGS.notes_per_second is not None and
      not generator.note_density_conditioning):
    tf.logging.warning(
        'Notes per second requested via flag, but generator is not set up to '
        'condition on note density. Requested note density will be ignored: %s',
        FLAGS.notes_per_second)

  if (FLAGS.pitch_class_histogram is not None and
      not generator.pitch_histogram_conditioning):
    tf.logging.warning(
        'Pitch class histogram requested via flag, but generator is not set up '
        'to condition on pitch class histogram. Requested pitch class '
        'histogram will be ignored: %s', FLAGS.pitch_class_histogram)

  if (FLAGS.disable_conditioning is not None and
      not generator.optional_conditioning):
    tf.logging.warning(
        'Disable conditioning flag set, but generator is not set up for '
        'optional conditioning. Requested disable conditioning flag will be '
        'ignored: %s', FLAGS.disable_conditioning)

  if FLAGS.notes_per_second is not None:
    generator_options.args['note_density'].string_value = FLAGS.notes_per_second
  if FLAGS.pitch_class_histogram is not None:
    generator_options.args['pitch_histogram'].string_value = (
        FLAGS.pitch_class_histogram)
  if FLAGS.disable_conditioning is not None:
    generator_options.args['disable_conditioning'].string_value = (
        FLAGS.disable_conditioning)

  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

  tf.logging.debug('primer_sequence: %s', primer_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(primer_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(output_dir, midi_filename)
    magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, output_dir)
示例#9
0
    def to_sequence(self,
                    velocity=100,
                    instrument=0,
                    program=0,
                    qpm=constants.DEFAULT_QUARTERS_PER_MINUTE,
                    base_note_sequence=None):
        """Converts the PianorollSequence to NoteSequence proto.

        Args:
          velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
          instrument: Midi instrument to give each note.
          program: Midi program to give each note.
          qpm: Quarter notes per minute (float).
          base_note_sequence: A NoteSequence to use a starting point. Must match the
              specified qpm.

        Raises:
          ValueError: if an unknown event is encountered.

        Returns:
          A NoteSequence proto.
        """
        seconds_per_step = 60.0 / qpm / self._steps_per_quarter

        sequence_start_time = self.start_step * seconds_per_step

        if base_note_sequence:
            sequence = copy.deepcopy(base_note_sequence)
            if sequence.tempos[0].qpm != qpm:
                raise ValueError(
                    'Supplied QPM (%d) does not match QPM of base_note_sequence (%d)'
                    % (qpm, sequence.tempos[0].qpm))
        else:
            sequence = music_pb2.NoteSequence()
            sequence.tempos.add().qpm = qpm
            sequence.ticks_per_quarter = STANDARD_PPQ

        step = 0
        # Keep a dictionary of open notes for each pitch.
        open_notes = {}
        for step, event in enumerate(self):
            frame_pitches = set(event)
            open_pitches = set(open_notes)

            for pitch_to_close in open_pitches - frame_pitches:
                note_to_close = open_notes[pitch_to_close]
                note_to_close.end_time = step * seconds_per_step + sequence_start_time
                del open_notes[pitch_to_close]

            for pitch_to_open in frame_pitches - open_pitches:
                new_note = sequence.notes.add()
                new_note.start_time = step * seconds_per_step + sequence_start_time
                new_note.pitch = pitch_to_open + self._min_pitch
                new_note.velocity = velocity
                new_note.instrument = instrument
                new_note.program = program
                open_notes[pitch_to_open] = new_note

        final_step = step + (len(open_notes) > 0)  # pylint: disable=g-explicit-length-test
        for note_to_close in open_notes.values():
            note_to_close.end_time = (final_step * seconds_per_step +
                                      sequence_start_time)

        sequence.total_time = seconds_per_step * final_step + sequence_start_time
        if sequence.notes:
            assert sequence.total_time >= sequence.notes[-1].end_time

        return sequence
示例#10
0
def extract_subsequence(sequence,
                        start_time,
                        end_time,
                        sustain_control_number=64):
    """Extracts a subsequence from a NoteSequence.

  Notes starting before `start_time` are not included. Notes ending after
  `end_time` are truncated. Time signature, tempo, key signature, chord changes,
  and sustain pedal events outside the specified time range are removed;
  however, the most recent event of each of these types prior to `start_time` is
  included at `start_time`. This means that e.g. if a time signature of 3/4 is
  specified in the original sequence prior to `start_time` (and is not followed
  by a different time signature), the extracted subsequence will include a 3/4
  time signature event at `start_time`. Pitch bends and control changes other
  than sustain are removed entirely.

  The extracted subsequence is shifted to start at time zero.

  Args:
    sequence: The NoteSequence to extract a subsequence from.
    start_time: The float time in seconds to start the subsequence.
    end_time: The float time in seconds to end the subsequence.
    sustain_control_number: The MIDI control number for sustain pedal.

  Returns:
    A new NoteSequence containing the subsequence of `sequence` from the
    specified time range.

  Raises:
    QuantizationStatusException: If the sequence has already been quantized.
    ValueError: If `start_time` is past the end of `sequence`.
  """
    if is_quantized_sequence(sequence):
        raise QuantizationStatusException(
            'Can only extract subsequence from unquantized NoteSequence.')

    if start_time >= sequence.total_time:
        raise ValueError('Cannot extract subsequence past end of sequence.')

    subsequence = music_pb2.NoteSequence()
    subsequence.CopyFrom(sequence)

    subsequence.total_time = 0.0

    # Extract notes.
    del subsequence.notes[:]
    for note in sequence.notes:
        if note.start_time < start_time or note.start_time >= end_time:
            continue
        new_note = subsequence.notes.add()
        new_note.CopyFrom(note)
        new_note.start_time -= start_time
        new_note.end_time = min(note.end_time, end_time) - start_time
        if new_note.end_time > subsequence.total_time:
            subsequence.total_time = new_note.end_time

    # Extract time signatures, key signatures, tempos, and chord changes (other
    # text annotations are deleted).

    del subsequence.time_signatures[:]
    del subsequence.key_signatures[:]
    del subsequence.tempos[:]
    del subsequence.text_annotations[:]

    event_types = [
        music_pb2.NoteSequence.TimeSignature,
        music_pb2.NoteSequence.KeySignature, music_pb2.NoteSequence.Tempo,
        music_pb2.NoteSequence.TextAnnotation
    ]
    events_by_type = [
        sequence.time_signatures, sequence.key_signatures, sequence.tempos,
        [
            annotation for annotation in sequence.text_annotations
            if annotation.annotation_type == CHORD_SYMBOL
        ]
    ]
    new_event_containers = [
        subsequence.time_signatures, subsequence.key_signatures,
        subsequence.tempos, subsequence.text_annotations
    ]

    for event_type, events, container in zip(event_types, events_by_type,
                                             new_event_containers):
        initial_event = None
        for event in sorted(events, key=lambda event: event.time):
            if event.time <= start_time:
                initial_event = event_type()
                initial_event.CopyFrom(event)
                continue
            elif event.time >= end_time:
                break
            new_event = container.add()
            new_event.CopyFrom(event)
            new_event.time -= start_time
        if initial_event:
            initial_event.time = 0.0
            container.extend([initial_event])
        container.sort(key=lambda event: event.time)

    # Extract sustain pedal events (other control changes are deleted). Sustain
    # pedal state prior to the extracted subsequence is maintained per-instrument.
    del subsequence.control_changes[:]
    sustain_events = [
        cc for cc in sequence.control_changes
        if cc.control_number == sustain_control_number
    ]
    initial_sustain_events = {}
    for sustain_event in sorted(sustain_events, key=lambda event: event.time):
        if sustain_event.time <= start_time:
            initial_sustain_event = music_pb2.NoteSequence.ControlChange()
            initial_sustain_event.CopyFrom(sustain_event)
            initial_sustain_events[
                sustain_event.instrument] = initial_sustain_event
            continue
        elif sustain_event.time >= end_time:
            break
        new_sustain_event = subsequence.control_changes.add()
        new_sustain_event.CopyFrom(sustain_event)
        new_sustain_event.time -= start_time
    for _, initial_sustain_event in initial_sustain_events.items():
        initial_sustain_event.time = 0.0
        subsequence.control_changes.extend([initial_sustain_event])
    subsequence.control_changes.sort(key=lambda cc: cc.time)

    # Pitch bends are deleted entirely.
    del subsequence.pitch_bends[:]

    subsequence.subsequence_info.start_time_offset = start_time
    subsequence.subsequence_info.end_time_offset = (sequence.total_time -
                                                    start_time -
                                                    subsequence.total_time)

    return subsequence
示例#11
0
def midi_to_sequence_proto(midi_data):
    """Convert MIDI file contents to a tensorflow.magenta.NoteSequence proto.
    Converts a MIDI file encoded as a string into a
    tensorflow.magenta.NoteSequence proto. Decoding errors are very common when
    working with large sets of MIDI files, so be sure to handle
    MIDIConversionError exceptions.
    Args:
      midi_data: A string containing the contents of a MIDI file or populated
          pretty_midi.PrettyMIDI object.
    Returns:
      A tensorflow.magenta.NoteSequence proto.
    Raises:
      MIDIConversionError: An improper MIDI mode was supplied.
    """

    # In practice many MIDI files cannot be decoded with pretty_midi. Catch all
    # errors here and try to log a meaningful message. So many different
    # exceptions are raised in pretty_midi.PrettyMidi that it is cumbersome to
    # catch them all only for the purpose of error logging.
    # pylint: disable=bare-except
    if isinstance(midi_data, pretty_midi.PrettyMIDI):
        midi = midi_data
    else:
        try:
            midi = pretty_midi.PrettyMIDI(BytesIO(midi_data))
        except:
            raise MIDIConversionError('Midi decoding error %s: %s' %
                                      (sys.exc_info()[0], sys.exc_info()[1]))
    # pylint: enable=bare-except
    # transpose(midi)
    #midi.instruments[0].notes = midi.instruments[0].notes[:17]

    sequence = music_pb2.NoteSequence()

    # Populate header.
    sequence.ticks_per_quarter = midi.resolution
    sequence.source_info.parser = music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI
    sequence.source_info.encoding_type = (
        music_pb2.NoteSequence.SourceInfo.MIDI)

    # Populate time signatures.
    for midi_time in midi.time_signature_changes:
        time_signature = sequence.time_signatures.add()
        time_signature.time = midi_time.time
        time_signature.numerator = midi_time.numerator
        try:
            # Denominator can be too large for int32.
            time_signature.denominator = midi_time.denominator
        except ValueError:
            raise MIDIConversionError('Invalid time signature denominator %d' %
                                      midi_time.denominator)

    # Populate key signatures.
    for midi_key in midi.key_signature_changes:
        key_signature = sequence.key_signatures.add()
        key_signature.time = midi_key.time
        # key_signature.key = midi_key.key_number % 12
        key_signature.key = 0
        midi_mode = midi_key.key_number // 12
        if midi_mode == 0:
            key_signature.mode = key_signature.MAJOR
        elif midi_mode == 1:
            key_signature.mode = key_signature.MINOR
        else:
            raise MIDIConversionError('Invalid midi_mode %i' % midi_mode)

    # Populate tempo changes.
    tempo_times, tempo_qpms = midi.get_tempo_changes()
    for time_in_seconds, tempo_in_qpm in zip(tempo_times, tempo_qpms):
        tempo = sequence.tempos.add()
        tempo.time = time_in_seconds
        tempo.qpm = tempo_in_qpm

    # Populate notes by gathering them all from the midi's instruments.
    # Also set the sequence.total_time as the max end time in the notes.
    midi_notes = []
    midi_pitch_bends = []
    midi_control_changes = []
    for num_instrument, midi_instrument in enumerate(midi.instruments):
        for midi_note in midi_instrument.notes:
            if not sequence.total_time or midi_note.end > sequence.total_time:
                sequence.total_time = midi_note.end
            midi_notes.append((midi_instrument.program, num_instrument,
                               midi_instrument.is_drum, midi_note))
        for midi_pitch_bend in midi_instrument.pitch_bends:
            midi_pitch_bends.append((midi_instrument.program, num_instrument,
                                     midi_instrument.is_drum, midi_pitch_bend))
        for midi_control_change in midi_instrument.control_changes:
            midi_control_changes.append(
                (midi_instrument.program, num_instrument,
                 midi_instrument.is_drum, midi_control_change))

    def add_notes(midi_notes, min_steps):
        groups = []
        uniquekeys = []
        data = sorted(midi_notes, key=lambda x: x[3].start)
        for k, g in groupby(data, key=lambda x: x[3].start):
            groups.append(list(g))
            uniquekeys.append(k)

        orns = sequence.Ornament.OrnType.items()

        def find_ornament(diffs, idx=0, progress=0):
            # recursive
            if not diffs:
                return (idx + progress, orns[progress - 1])
            if diffs[0] > min_steps:
                return (idx + progress, orns[progress])
            else:
                progress += 1
                return find_ornament(diffs[1:], idx, progress)

        def add_orn(idx, orn_type):
            _idx = idx + 1
            steps_to_prev = orns.index(orn_type) + 1
            _groups = groups[_idx - steps_to_prev:_idx]
            orn_start_grp = _groups[0]
            orn_next_grp = _groups[1]
            orn_end_grp = _groups[-1]
            orn_notes = []

            if len(orn_start_grp) != 1:
                if len(orn_next_grp) != 1:
                    raise '1st and 2nd are both chords?'
                else:
                    data = [elm[3].pitch for elm in orn_start_grp]
                    location = bisect.bisect_left(data,
                                                  orn_next_grp[0][3].pitch)
                    result = orn_start_grp[location - 1]
                    orn_notes.append([result])
            else:
                orn_notes.append(orn_start_grp)

            for i, g in enumerate(_groups[1:]):
                if len(g) != 1:
                    data = [elm[3].pitch for elm in g]
                    location = bisect.bisect_left(data, _groups[i][0][3].pitch)
                    result = g[location]
                    orn_notes.append([result])
                else:
                    orn_notes.append(g)

            orn_notes = list(chain.from_iterable(orn_notes))

            rolled_chord = None
            if len(_groups) >= 3:
                try:
                    rolled_chord = pitches_to_chord_symbol(
                        sorted([o[3].pitch for o in orn_notes]))
                    print(rolled_chord)
                    rolled_start = min(orn_notes,
                                       key=lambda o: o[3].start)[3].start
                    rolled_end = max(orn_notes, key=lambda o: o[3].end)[3].end
                    for onoe in orn_notes:
                        onoe[3].start = rolled_start
                        onoe[3].end = rolled_end
                except ChordSymbolException:
                    pass

            program, instrument, is_drum, midi_note = orn_notes[0]
            orn = sequence.ornaments.add()
            orn.orn_type = orns[5][1] if rolled_chord else orns[len(orn_notes)
                                                                - 1][1]
            orn.program = program
            orn.instrument = instrument
            orn.is_drum = is_drum
            orn.start_time = midi_note.start
            orn.end_time = orn_notes[-1][
                3].end  # should be minus 1 quantization steps from end note start
            orn.start_pitch = midi_note.pitch
            orn.end_pitch = orn_notes[-1][3].pitch
            orn.velocity = midi_note.velocity
            #orn.notes.extend([o[3] for o in orn_notes])

            return orn, orn_notes, rolled_chord

        def add_note(note_group):
            for note in note_group:
                program, instrument, is_drum, midi_note = note
                note = sequence.notes.add()
                note.instrument = instrument
                note.program = program
                note.start_time = midi_note.start
                note.end_time = midi_note.end
                note.pitch = midi_note.pitch
                note.velocity = midi_note.velocity
                note.is_drum = is_drum

            return sequence.notes[-len(note_group):]

        idx = 0
        while idx < len(groups) - 1:
            start = groups[idx:idx + 5]
            diffs = [(s2[0][3].start - s1[0][3].start)
                     for s1, s2 in zip(start, start[1:])]
            idx, ornament = find_ornament(diffs, idx)
            if not ornament[1]:
                add_note(groups[idx])
                idx += 1
            else:
                o, ons, rc = add_orn(idx, ornament)
                if o.orn_type == ARPEGGIO:
                    add_note(ons)
                else:
                    add_note(groups[idx])
                idx += 1

    add_notes(midi_notes, 0.1)
    # for program, instrument, is_drum, midi_note in midi_notes:
    #     note = sequence.notes.add()
    #     note.instrument = instrument
    #     note.program = program
    #     note.start_time = midi_note.start
    #     note.end_time = midi_note.end
    #     note.pitch = midi_note.pitch
    #     note.velocity = midi_note.velocity
    #     note.is_drum = is_drum

    for program, instrument, is_drum, midi_pitch_bend in midi_pitch_bends:
        pitch_bend = sequence.pitch_bends.add()
        pitch_bend.instrument = instrument
        pitch_bend.program = program
        pitch_bend.time = midi_pitch_bend.time
        pitch_bend.bend = midi_pitch_bend.pitch
        pitch_bend.is_drum = is_drum

    for program, instrument, is_drum, midi_control_change in midi_control_changes:
        control_change = sequence.control_changes.add()
        control_change.instrument = instrument
        control_change.program = program
        control_change.time = midi_control_change.time
        control_change.control_number = midi_control_change.number
        control_change.control_value = midi_control_change.value
        control_change.is_drum = is_drum

    # TODO(douglaseck): Estimate note type (e.g. quarter note) and populate
    # note.numerator and note.denominator.

    return sequence
示例#12
0
    def to_sequence(self,
                    velocity=100,
                    instrument=0,
                    program=0,
                    qpm=constants.DEFAULT_QUARTERS_PER_MINUTE,
                    base_note_sequence=None):

        seconds_per_step = 60.0 / qpm / self._steps_per_quarter

        sequence_start_time = self.start_step * seconds_per_step

        if base_note_sequence:
            sequence = copy.deepcopy(base_note_sequence)
            if sequence.tempos[0].qpm != qpm:
                raise ValueError(
                    'Supplied QPM (%d) does not match QPM of base_note_sequence (%d)'
                    % (qpm, sequence.tempos[0].qpm))
        else:
            sequence = music_pb2.NoteSequence()
            sequence.tempos.add().qpm = qpm
            sequence.ticks_per_quarter = STANDARD_PPQ

        step = 0
        # Use lists rather than sets because one pitch may be active multiple times.
        pitch_start_steps = []
        pitches_to_end = []
        for i, event in enumerate(self):
            if event.event_type == PolyphonicEvent.START:
                if i != 0:
                    tf.logging.debug(
                        'Ignoring START marker not at beginning of sequence at position '
                        '%d' % i)
            elif event.event_type == PolyphonicEvent.END and i < len(self) - 1:
                tf.logging.debug(
                    'Ignoring END maker before end of sequence at position %d'
                    % i)
            elif event.event_type == PolyphonicEvent.NEW_NOTE:
                pitch_start_steps.append((event.pitch, step))
            elif event.event_type == PolyphonicEvent.CONTINUED_NOTE:
                try:
                    pitches_to_end.remove(event.pitch)
                except ValueError:
                    tf.logging.debug(
                        'Attempted to continue pitch %s at step %s, but pitch was not '
                        'active. Ignoring.' % (event.pitch, step))
            elif (event.event_type == PolyphonicEvent.STEP_END
                  or event.event_type == PolyphonicEvent.END):
                # Find active pitches that should end. Create notes for them, based on
                # when they started.
                # Make a copy of pitch_start_steps so we can remove things from it while
                # iterating.
                for pitch_start_step in list(pitch_start_steps):
                    if pitch_start_step[0] in pitches_to_end:
                        pitches_to_end.remove(pitch_start_step[0])
                        pitch_start_steps.remove(pitch_start_step)

                        note = sequence.notes.add()
                        note.start_time = (
                            pitch_start_step[1] * seconds_per_step +
                            sequence_start_time)
                        note.end_time = step * seconds_per_step + sequence_start_time
                        note.pitch = pitch_start_step[0]
                        note.velocity = velocity
                        note.instrument = instrument
                        note.program = program

                assert not pitches_to_end

                # Increment the step counter.
                step += 1

                # All active pitches are eligible for ending unless continued.
                pitches_to_end = [ps[0] for ps in pitch_start_steps]
            else:
                raise ValueError('Unknown event type: %s' % event.event_type)

        if pitch_start_steps:
            raise ValueError(
                'Sequence ended, but not all pitches were ended. This likely means '
                'the sequence was missing a STEP_END event before the end of the '
                'sequence. To ensure a well-formed sequence, call set_length first.'
            )

        sequence.total_time = seconds_per_step * \
            (step - 1) + sequence_start_time
        if sequence.notes:
            assert sequence.total_time >= sequence.notes[-1].end_time

        return sequence