Example #1
0
    def encode_note_sequence(self, ns):
        """Transform a NoteSequence into a list of chord event indices.

    Args:
      ns: NoteSequence proto containing the chords to encode (as text
          annotations).

    Returns:
      ids: List of chord event indices.
    """
        qns = note_seq.quantize_note_sequence(ns, self._steps_per_quarter)

        chords = []
        current_chord = note_seq.NO_CHORD
        current_step = 0
        for ta in sorted(qns.text_annotations, key=lambda ta: ta.time):
            if ta.annotation_type != CHORD_SYMBOL:
                continue
            chords += [current_chord] * (ta.quantized_step - current_step)
            current_chord = ta.text
            current_step = ta.quantized_step
        chords += [current_chord] * (qns.total_quantized_steps - current_step)

        return self._encode_chord_symbols(chords)
Example #2
0
  def _to_tensors_fn(self, note_sequence):
    # Performance sequences require sustain to be correctly interpreted.
    note_sequence = note_seq.apply_sustain_control_changes(note_sequence)

    if self._chord_encoding and not any(
        ta.annotation_type == CHORD_SYMBOL
        for ta in note_sequence.text_annotations):
      try:
        # Quantize just for the purpose of chord inference.
        # TODO(iansimon): Allow chord inference in unquantized sequences.
        quantized_sequence = note_seq.quantize_note_sequence(
            note_sequence, self._steps_per_quarter)
        if (note_seq.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
            self._steps_per_bar):
          return data.ConverterTensors()

        # Infer chords in quantized sequence.
        note_seq.infer_chords_for_sequence(quantized_sequence)

      except (note_seq.BadTimeSignatureError,
              note_seq.NonIntegerStepsPerBarError, note_seq.NegativeTimeError,
              note_seq.ChordInferenceError):
        return data.ConverterTensors()

      # Copy inferred chords back to original sequence.
      for qta in quantized_sequence.text_annotations:
        if qta.annotation_type == CHORD_SYMBOL:
          ta = note_sequence.text_annotations.add()
          ta.annotation_type = CHORD_SYMBOL
          ta.time = qta.time
          ta.text = qta.text

    if note_sequence.tempos:
      quarters_per_minute = note_sequence.tempos[0].qpm
    else:
      quarters_per_minute = note_seq.DEFAULT_QUARTERS_PER_MINUTE
    quarters_per_bar = self._steps_per_bar / self._steps_per_quarter
    hop_size_quarters = quarters_per_bar * self._hop_size_bars
    hop_size_seconds = 60.0 * hop_size_quarters / quarters_per_minute

    # Split note sequence by bar hop size (in seconds).
    subsequences = note_seq.split_note_sequence(note_sequence, hop_size_seconds)

    if self._first_subsequence_only and len(subsequences) > 1:
      return data.ConverterTensors()

    sequence_tensors = []
    sequence_chord_tensors = []

    for subsequence in subsequences:
      # Quantize this subsequence.
      try:
        quantized_subsequence = note_seq.quantize_note_sequence(
            subsequence, self._steps_per_quarter)
        if (note_seq.steps_per_bar_in_quantized_sequence(quantized_subsequence)
            != self._steps_per_bar):
          return data.ConverterTensors()
      except (note_seq.BadTimeSignatureError,
              note_seq.NonIntegerStepsPerBarError, note_seq.NegativeTimeError):
        return data.ConverterTensors()

      # Convert the quantized subsequence to tensors.
      tensors, chord_tensors = self._quantized_subsequence_to_tensors(
          quantized_subsequence)
      if tensors:
        sequence_tensors.append(tensors)
        if self._chord_encoding:
          sequence_chord_tensors.append(chord_tensors)

    tensors = data.ConverterTensors(
        inputs=sequence_tensors, outputs=sequence_tensors,
        controls=sequence_chord_tensors)
    return hierarchical_pad_tensors(tensors, self.max_tensors_per_notesequence,
                                    self.is_training, self._max_lengths,
                                    self.end_token, self.input_depth,
                                    self.output_depth, self.control_depth,
                                    self._control_pad_token)
Example #3
0
def split_performance(performance, steps_per_segment, new_performance_fn,
                      clip_tied_notes=False):
  """Splits a performance into multiple fixed-length segments.

  Args:
    performance: A Performance (or MetricPerformance) object to split.
    steps_per_segment: The number of quantized steps per segment.
    new_performance_fn: A function to create new Performance (or
        MetricPerformance objects). Takes `quantized_sequence` and `start_step`
        arguments.
    clip_tied_notes: If True, clip tied notes across segments by converting each
        segment to NoteSequence and back.

  Returns:
    A list of performance segments.
  """
  segments = []
  cur_segment = new_performance_fn(quantized_sequence=None, start_step=0)
  cur_step = 0
  for e in performance:
    if e.event_type != performance_lib.PerformanceEvent.TIME_SHIFT:
      if cur_step == steps_per_segment:
        # At a segment boundary, note-offs happen before the cutoff.
        # Everything else happens after.
        if e.event_type != performance_lib.PerformanceEvent.NOTE_OFF:
          segments.append(cur_segment)
          cur_segment = new_performance_fn(
              quantized_sequence=None,
              start_step=len(segments) * steps_per_segment)
          cur_step = 0
        cur_segment.append(e)
      else:
        # We're not at a segment boundary.
        cur_segment.append(e)
    else:
      if cur_step + e.event_value <= steps_per_segment:
        # If it's a time shift, but we're still within the current segment,
        # just append to current segment.
        cur_segment.append(e)
        cur_step += e.event_value
      else:
        # If it's a time shift that goes beyond the current segment, possibly
        # split the time shift into two events and create a new segment.
        cur_segment_steps = steps_per_segment - cur_step
        if cur_segment_steps > 0:
          cur_segment.append(performance_lib.PerformanceEvent(
              event_type=performance_lib.PerformanceEvent.TIME_SHIFT,
              event_value=cur_segment_steps))

        segments.append(cur_segment)
        cur_segment = new_performance_fn(
            quantized_sequence=None,
            start_step=len(segments) * steps_per_segment)
        cur_step = 0

        new_segment_steps = e.event_value - cur_segment_steps
        if new_segment_steps > 0:
          cur_segment.append(performance_lib.PerformanceEvent(
              event_type=performance_lib.PerformanceEvent.TIME_SHIFT,
              event_value=new_segment_steps))
          cur_step += new_segment_steps

  segments.append(cur_segment)

  # There may be a final segment with zero duration. If so, remove it.
  if segments and segments[-1].num_steps == 0:
    segments = segments[:-1]

  if clip_tied_notes:
    # Convert each segment to NoteSequence and back to remove notes that are
    # held across segment boundaries.
    for i in range(len(segments)):
      sequence = segments[i].to_sequence()
      if isinstance(segments[i], performance_lib.MetricPerformance):
        # Performance is quantized relative to meter.
        quantized_sequence = note_seq.quantize_note_sequence(
            sequence, steps_per_quarter=segments[i].steps_per_quarter)
      else:
        # Performance is quantized with absolute timing.
        quantized_sequence = note_seq.quantize_note_sequence_absolute(
            sequence, steps_per_second=segments[i].steps_per_second)
      segments[i] = new_performance_fn(
          quantized_sequence=quantized_sequence,
          start_step=segments[i].start_step)
      segments[i].set_length(steps_per_segment)

  return segments
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    if input_sequence and input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm
    else:
      qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
    steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = note_seq.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
      input_start_step = note_seq.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    if last_end_time > generate_section.start_time:
      raise sequence_generator.SequenceGeneratorError(
          'Got GenerateSection request for section that is before the end of '
          'the NoteSequence. This model can only extend sequences. Requested '
          'start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_sequence = note_seq.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = melody_pipelines.extract_melodies(
        quantized_sequence, search_start_step=input_start_step, min_bars=0,
        min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = note_seq.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    end_step = note_seq.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      steps_per_bar = int(
          note_seq.steps_per_bar_in_quantized_sequence(quantized_sequence))
      melody = note_seq.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(
        end_step - melody.start_step, melody, **args)
    generated_sequence = generated_melody.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Example #5
0
  def _process_ns(self, ns):
    if self._filters:
      if ns.total_time > self._filters['max_total_time']:
        logging.info('Skipping %s: total_time=%f', ns.id, ns.total_time)
        beam_metrics.counter('ExtractExamplesDoFn', 'filtered-too-long').inc()
        return
      if len(ns.notes) > self._filters['max_num_notes']:
        logging.info('Skipping %s: num_notes=%d', ns.id, len(ns.notes))
        beam_metrics.counter(
            'ExtractExamplesDoFn', 'filtered-too-many-notes').inc()
        return

      try:
        qns = note_seq.quantize_note_sequence(ns, steps_per_quarter=16)
      except (note_seq.BadTimeSignatureError,
              note_seq.NonIntegerStepsPerBarError, note_seq.NegativeTimeError):
        beam_metrics.counter('ExtractExamplesDoFn', 'quantize-failed').inc()
        return

      vels = set()
      metric_positions = set()
      drums_only = True
      for note in qns.notes:
        drums_only &= note.is_drum
        if ((self._filters['is_drum'] is None or
             note.is_drum == self._filters['is_drum'])
            and note.velocity > 0):
          vels.add(note.velocity)
          metric_positions.add(note.quantized_start_step % 16)

      if len(vels) < self._filters['min_velocities']:
        beam_metrics.counter(
            'ExtractExamplesDoFn', 'filtered-min-velocities').inc()
        return
      if len(metric_positions) < self._filters['min_metric_positions']:
        beam_metrics.counter(
            'ExtractExamplesDoFn', 'filtered-min-metric-positions').inc()
        return
      if self._filters['drums_only'] and not drums_only:
        beam_metrics.counter(
            'ExtractExamplesDoFn', 'filtered-drums-only').inc()
        return

    beam_metrics.counter('ExtractExamplesDoFn', 'unfiltered-sequences').inc()
    logging.info('Converting %s to tensors', ns.id)
    extracted_examples = self._config.data_converter.to_tensors(ns)
    if not extracted_examples.outputs:
      beam_metrics.counter('ExtractExamplesDoFn', 'empty-extractions').inc()
      return
    beam_metrics.counter('ExtractExamplesDoFn', 'extracted-examples').inc(
        len(extracted_examples.outputs))
    for _, outputs, controls, _ in zip(*extracted_examples):
      if controls.size:
        example_ns = self._config.data_converter.from_tensors(
            [outputs], [controls])[0]
      else:
        example_ns = self._config.data_converter.from_tensors([outputs])[0]
      # Try to re-encode.
      # TODO(adarob): For now we filter and count examples that cannot be
      # re-extracted, but ultimately the converter should filter these or avoid
      # producing them all together.
      reextracted_examples = self._config.data_converter.to_tensors(
          example_ns).inputs
      assert len(reextracted_examples) <= 1
      if not reextracted_examples:
        logging.warning(
            'Extracted example NoteSequence does not reproduce example. '
            'Skipping: %s', example_ns)
        beam_metrics.counter('ExtractExamplesDoFn', 'empty-reextraction').inc()
        continue
      # Extra checks if the code returns multiple segments.
      # TODO(fjord): should probably make this recursive for cases with more
      # than 1 level of hierarchy.
      if isinstance(outputs, list):
        if len(outputs) != len(reextracted_examples[0]):
          logging.warning(
              'Re-extracted example tensor has different number of segments. '
              'ID: %s. original %d, reextracted %d. Skipping.', ns.id,
              len(outputs), len(reextracted_examples[0]))
          beam_metrics.counter(
              'ExtractExamplesDoFn', 'different-reextraction-count').inc()
          continue
        for i in range(len(outputs)):
          if not np.array_equal(reextracted_examples[0][i], outputs[i]):
            logging.warning(
                'Re-extracted example tensor does not equal original example. '
                'ID: %s. Index %d. NoteSequence: %s', ns.id, i, example_ns)
            beam_metrics.counter(
                'ExtractExamplesDoFn', 'different-reextraction').inc()
      yield example_ns, ns.id
Example #6
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = note_seq.trim_note_sequence(
                input_sequence, input_section.start_time,
                input_section.end_time)
            input_start_step = note_seq.quantize_to_step(
                input_section.start_time, steps_per_second, quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0

        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = note_seq.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = note_seq.quantize_to_step(
            generate_section.start_time, steps_per_second, quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = note_seq.quantize_to_step(
            generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

        if extracted_seqs and extracted_seqs[0]:
            poly_seq = extracted_seqs[0]
        else:
            # If no track could be extracted, create an empty track that starts at the
            # requested generate_start_step. This will result in a sequence that
            # contains only the START token.
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)

        # Ensure that the track extends up to the step we want to start generating.
        poly_seq.set_length(generate_start_step - poly_seq.start_step)
        # Trim any trailing end events to prepare the sequence for more events to be
        # appended during generation.
        poly_seq.trim_trailing_end_events()

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        # Inject the priming sequence as melody in the output of the generator, if
        # requested.
        # This option starts with no_ so that if it is unspecified (as will be the
        # case when used with the midi interface), the default will be to inject the
        # primer.
        if not (generator_options.args['no_inject_primer_during_generation'].
                bool_value):
            melody_to_inject = copy.deepcopy(poly_seq)
            if generator_options.args['condition_on_primer'].bool_value:
                inject_start_step = poly_seq.num_steps
            else:
                # 0 steps because we'll overwrite poly_seq with a blank sequence below.
                inject_start_step = 0

            args['modify_events_callback'] = functools.partial(
                _inject_melody, melody_to_inject, inject_start_step)

        # If we don't want to condition on the priming sequence, then overwrite
        # poly_seq with a blank sequence to feed into the generator.
        if not generator_options.args['condition_on_primer'].bool_value:
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)
            poly_seq.trim_trailing_end_events()

        total_steps = poly_seq.num_steps + (generate_end_step -
                                            generate_start_step)

        while poly_seq.num_steps < total_steps:
            # Assume it takes ~5 rnn steps to generate one quantized step.
            # Can't know for sure until generation is finished because the number of
            # notes per quantized step is variable.
            steps_to_gen = total_steps - poly_seq.num_steps
            rnn_steps_to_gen = 5 * steps_to_gen
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            poly_seq = self._model.generate_polyphonic_sequence(
                len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
        poly_seq.set_length(total_steps)

        if generator_options.args['condition_on_primer'].bool_value:
            generated_sequence = poly_seq.to_sequence(qpm=qpm)
        else:
            # Specify a base_note_sequence because the priming sequence was not
            # included in poly_seq.
            generated_sequence = poly_seq.to_sequence(
                qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Example #7
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = note_seq.trim_note_sequence(
                input_sequence, input_section.start_time,
                input_section.end_time)
            input_start_step = note_seq.quantize_to_step(
                input_section.start_time, steps_per_second, quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0

        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = note_seq.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = note_seq.quantize_to_step(
            generate_section.start_time, steps_per_second, quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = note_seq.quantize_to_step(
            generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

        if extracted_seqs and extracted_seqs[0]:
            pianoroll_seq = extracted_seqs[0]
        else:
            raise ValueError('No priming pianoroll could be extracted.')

        # Ensure that the track extends up to the step we want to start generating.
        pianoroll_seq.set_length(generate_start_step -
                                 pianoroll_seq.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        total_steps = pianoroll_seq.num_steps + (generate_end_step -
                                                 generate_start_step)

        pianoroll_seq = self._model.generate_pianoroll_sequence(
            total_steps, pianoroll_seq, **args)
        pianoroll_seq.set_length(total_steps)

        generated_sequence = pianoroll_seq.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Example #8
0
 def _quantize_note_sequence(self, ns):
     return note_seq.quantize_note_sequence(ns, self._steps_per_quarter)
see polyphony_sequence_generator.py (in magenta.models.polyphone_rnn)
"""

input_file = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly.mid'
out_file = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_out.mid'
out_file_trans = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_trans_out.mid'
out_file_pred = '/Users/nikolasborrel/github/midi_data_out/melodies/piano_poly_pred_out.mid'

min_note = 60
max_note = 72
transpose_to_key = 2

steps_per_quarter = 4  # default, resulting in 16th note quantization

note_seq_raw = midi_io.midi_file_to_note_sequence(input_file)
note_seq_quan = note_seq.quantize_note_sequence(note_seq_raw,
                                                steps_per_quarter)
extracted_seqs, stats = polyphony_lib.extract_polyphonic_sequences(
    note_seq_quan)

assert (len(extracted_seqs <= 1)
        )  # docs states that only one poly list are extracted
poly_seq = extracted_seqs[0]

print(poly_seq)

seq1 = poly_seq.to_sequence()  #qpm=60.0
midi_io.sequence_proto_to_midi_file(seq1, out_file)

poly_encoder = encoder_decoder.OneHotEventSequenceEncoderDecoder(
    polyphony_encoder_decoder.PolyphonyOneHotEncoding())