Пример #1
0
  def testExtractPolyphonicSequences(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)
    self.assertEqual(1, len(seqs))

    seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_sequence, min_steps_discard=2, max_steps_discard=5)
    self.assertEqual(1, len(seqs))

    self.note_sequence.notes[0].end_time = 1.0
    self.note_sequence.total_time = 1.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))

    self.note_sequence.notes[0].end_time = 10.0
    self.note_sequence.total_time = 10.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))
Пример #2
0
  def testExtractNonZeroStart(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_sequence, start_step=4, min_steps_discard=1)
    self.assertEqual(0, len(seqs))
    seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_sequence, start_step=0, min_steps_discard=1)
    self.assertEqual(1, len(seqs))
    def testExtractNonZeroStart(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(60, 100, 0.0, 4.0)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)

        seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_sequence, start_step=4, min_steps_discard=1)
        self.assertEqual(0, len(seqs))
        seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_sequence, start_step=0, min_steps_discard=1)
        self.assertEqual(1, len(seqs))
Пример #4
0
 def transform(self, quantized_sequence):
     poly_seqs, stats = polyphony_lib.extract_polyphonic_sequences(
         quantized_sequence,
         min_steps_discard=self._min_steps,
         max_steps_discard=self._max_steps)
     self._set_stats(stats)
     return poly_seqs
 def transform(self, quantized_sequence):
   poly_seqs, stats = polyphony_lib.extract_polyphonic_sequences(
       quantized_sequence,
       min_steps_discard=self._min_steps,
       max_steps_discard=self._max_steps)
   self._set_stats(stats)
   return poly_seqs
Пример #6
0
  def testExtractPolyphonicMultiProgram(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    self.note_sequence.notes[0].program = 2
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)
    self.assertEqual(0, len(seqs))
Пример #7
0
  def testExtractPolyphonicMultiProgram(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    self.note_sequence.notes[0].program = 2
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)
    self.assertEqual(0, len(seqs))
Пример #8
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0

        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = mm.quantize_to_step(generate_section.start_time,
                                                  steps_per_second,
                                                  quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = mm.quantize_to_step(generate_section.end_time,
                                                steps_per_second,
                                                quantize_cutoff=1.0)

        if extracted_seqs and extracted_seqs[0]:
            poly_seq = extracted_seqs[0]
        else:
            # If no track could be extracted, create an empty track that starts at the
            # requested generate_start_step. This will result in a sequence that
            # contains only the START token.
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)

        # Ensure that the track extends up to the step we want to start generating.
        poly_seq.set_length(generate_start_step - poly_seq.start_step)
        # Trim any trailing end events to prepare the sequence for more events to be
        # appended during generation.
        poly_seq.trim_trailing_end_events()

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        # Inject the priming sequence as melody in the output of the generator, if
        # requested.
        # This option starts with no_ so that if it is unspecified (as will be the
        # case when used with the midi interface), the default will be to inject the
        # primer.
        if not (generator_options.args['no_inject_primer_during_generation'].
                bool_value):
            melody_to_inject = copy.deepcopy(poly_seq)
            if generator_options.args['condition_on_primer'].bool_value:
                inject_start_step = poly_seq.num_steps
            else:
                # 0 steps because we'll overwrite poly_seq with a blank sequence below.
                inject_start_step = 0

            args['modify_events_callback'] = functools.partial(
                _inject_melody, melody_to_inject, inject_start_step)

        # If we don't want to condition on the priming sequence, then overwrite
        # poly_seq with a blank sequence to feed into the generator.
        if not generator_options.args['condition_on_primer'].bool_value:
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)
            poly_seq.trim_trailing_end_events()

        total_steps = poly_seq.num_steps + (generate_end_step -
                                            generate_start_step)

        while poly_seq.num_steps < total_steps:
            # Assume it takes ~5 rnn steps to generate one quantized step.
            # Can't know for sure until generation is finished because the number of
            # notes per quantized step is variable.
            steps_to_gen = total_steps - poly_seq.num_steps
            rnn_steps_to_gen = 5 * steps_to_gen
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            poly_seq = self._model.generate_polyphonic_sequence(
                len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
        poly_seq.set_length(total_steps)

        if generator_options.args['condition_on_primer'].bool_value:
            generated_sequence = poly_seq.to_sequence(qpm=qpm)
        else:
            # Specify a base_note_sequence because the priming sequence was not
            # included in poly_seq.
            generated_sequence = poly_seq.to_sequence(
                qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    # This sequence will be quantized later, so it is guaranteed to have only 1
    # tempo.
    qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
    if input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm

    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)

    extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_primer_sequence, start_step=input_start_step)
    assert len(extracted_seqs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_seqs and extracted_seqs[0]:
      poly_seq = extracted_seqs[0]
    else:
      # If no track could be extracted, create an empty track that starts at the
      # requested generate_start_step. This will result in a sequence that
      # contains only the START token.
      poly_seq = polyphony_lib.PolyphonicSequence(
          steps_per_quarter=(
              quantized_primer_sequence.quantization_info.steps_per_quarter),
          start_step=generate_start_step)

    # Ensure that the track extends up to the step we want to start generating.
    poly_seq.set_length(generate_start_step - poly_seq.start_step)
    # Trim any trailing end events to prepare the sequence for more events to be
    # appended during generation.
    poly_seq.trim_trailing_end_events()

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    # Inject the priming sequence as melody in the output of the generator, if
    # requested.
    # This option starts with no_ so that if it is unspecified (as will be the
    # case when used with the midi interface), the default will be to inject the
    # primer.
    if not (generator_options.args[
        'no_inject_primer_during_generation'].bool_value):
      melody_to_inject = copy.deepcopy(poly_seq)
      if generator_options.args['condition_on_primer'].bool_value:
        inject_start_step = poly_seq.num_steps
      else:
        # 0 steps because we'll overwrite poly_seq with a blank sequence below.
        inject_start_step = 0

      args['modify_events_callback'] = partial(
          _inject_melody, melody_to_inject, inject_start_step)

    # If we don't want to condition on the priming sequence, then overwrite
    # poly_seq with a blank sequence to feed into the generator.
    if not generator_options.args['condition_on_primer'].bool_value:
      poly_seq = polyphony_lib.PolyphonicSequence(
          steps_per_quarter=(
              quantized_primer_sequence.quantization_info.steps_per_quarter),
          start_step=generate_start_step)
      poly_seq.trim_trailing_end_events()

    total_steps = poly_seq.num_steps + (
        generate_end_step - generate_start_step)

    while poly_seq.num_steps < total_steps:
      # Assume it takes ~5 rnn steps to generate one quantized step.
      # Can't know for sure until generation is finished because the number of
      # notes per quantized step is variable.
      steps_to_gen = total_steps - poly_seq.num_steps
      rnn_steps_to_gen = 5 * steps_to_gen
      tf.logging.info(
          'Need to generate %d more steps for this sequence, will try asking '
          'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
      poly_seq = self._model.generate_polyphonic_sequence(
          len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
    poly_seq.set_length(total_steps)

    if generator_options.args['condition_on_primer'].bool_value:
      generated_sequence = poly_seq.to_sequence(qpm=qpm)
    else:
      # Specify a base_note_sequence because the priming sequence was not
      # included in poly_seq.
      generated_sequence = poly_seq.to_sequence(
          qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Пример #10
0
input_file = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly.mid'
out_file = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_out.mid'
out_file_trans = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_trans_out.mid'
out_file_pred = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_pred_out.mid'

min_note = 60
max_note = 72
transpose_to_key = 2

steps_per_quarter = 4  # default, resulting in 16th note quantization

note_seq_raw = midi_io.midi_file_to_note_sequence(input_file)
note_seq_quan = note_seq.quantize_note_sequence(note_seq_raw,
                                                steps_per_quarter)
extracted_seqs, stats = polyphony_lib.extract_polyphonic_sequences(
    note_seq_quan)

assert (len(extracted_seqs <= 1)
        )  # docs states that only one poly list are extracted
poly_seq = extracted_seqs[0]

print(poly_seq)

seq1 = poly_seq.to_sequence()  #qpm=60.0
midi_io.sequence_proto_to_midi_file(seq1, out_file)

poly_encoder = encoder_decoder.OneHotEventSequenceEncoderDecoder(
    polyphony_encoder_decoder.PolyphonyOneHotEncoding())

if len(note_seq_raw.key_signatures) > 1:
    print(