Esempio n. 1
0
def get_primer_ns(filename, max_length):
    """
    Convert Midi file to note sequences for priming.
    :param filename: Midi file name.
    :param max_length: Maximum note sequence length for priming in seconds.
    :return:
        Note sequences for priming.
    """
    primer_ns = mm.midi_file_to_note_sequence(filename)

    # Handle sustain pedal in primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    if primer_ns.total_time > max_length:
        LOGGER.warn(
            'Primer duration %d is longer than max second %d, truncating.' %
            (primer_ns.total_time, max_length))
        primer_ns = mm.extract_subsequence(primer_ns, 0, max_length)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        LOGGER.warn('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    return primer_ns
Esempio n. 2
0
    def _read_midi(self, input_file):
        """
        Read midi file into note sequence.
        :param input_file: A string path to midi file.
        :return: Note sequence.
        """
        note_sequence = mm.midi_file_to_note_sequence(input_file)

        # Handle sustain pedal in primer.
        if self.config.sustain:
            note_sequence = mm.apply_sustain_control_changes(note_sequence)

        # Trim to desired number of seconds.
        if note_sequence.total_time > self.config.max_length:
            LOGGER.warning(
                'Note sequence %d is longer than max seconds %d, truncating.',
                note_sequence.total_time, self.config.max_length)
            note_sequence = mm.extract_subsequence(note_sequence, 0,
                                                   self.config.max_length)

        # Whether or not remove drums.
        if any(note.is_drum
               for note in note_sequence.notes) and not self.config.use_drum:
            LOGGER.warning('Midi file contains drum sounds, removing.')
            notes = [note for note in note_sequence.notes if not note.is_drum]
            del note_sequence.notes[:]
            note_sequence.notes.extend(notes)

        # Set primer instrument and program.
        for note in note_sequence.notes:
            note.instrument = 1
            note.program = 0

        return note_sequence
def piano_continuation(primer):
    print("이함수도안돌아?")
    primer_ns = mm.midi_file_to_note_sequence(primer)

    # Handle sustain pedal in the primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    max_primer_seconds = 20  #@param {type:"slider", min:1, max:120}
    if primer_ns.total_time > max_primer_seconds:
        print('Primer is longer than %d seconds, truncating.' %
              max_primer_seconds)
        primer_ns = mm.extract_subsequence(primer_ns, 0, max_primer_seconds)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        print('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    #note_sequence_to_midi_file(primer_ns, 'modified_'+primer)

    targets = uncondi_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, 4096 - len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    # Generate sample events.
    sample_ids = next(uncondi_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids, encoder=uncondi_encoders['targets'])
    ns = mm.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer.
    continuation_ns = mm.concatenate_sequences([primer_ns, ns])

    note_sequence_to_midi_file(continuation_ns, 'continuated_' + primer)
Esempio n. 4
0
    def generate_primer(self):
        """
        Put something important here.

        """
        if self.conditioned:
            raise ValueError("Should be using an unconditioned model!")

        primer_ns = self.sequence
        primer_ns = mm.apply_sustain_control_changes(primer_ns)
        max_primer_seconds = 10

        if primer_ns.total_time > max_primer_seconds:
            print(f'Primer is longer than {max_primer_seconds} seconds, truncating.')
            # cut primer if it's too long
            primer_ns = mm.extract_subsequence(
                primer_ns, 0, max_primer_seconds)

        if any(note.is_drum for note in primer_ns.notes):
            print('Primer contains drums; they will be removed.')
            notes = [note for note in primer_ns.notes if not note.is_drum]
            del primer_ns.notes[:]
            primer_ns.notes.extend(notes)

        for note in primer_ns.notes:
            # make into piano
            note.instrument = 1
            note.program = 0

        self.targets = self.encoders['targets'].encode_note_sequence(
                        primer_ns)
        # Remove the end token from the encoded primer.
        self.targets = self.targets[:-1]
        self.decode_length = max(0, 4096 - len(self.targets))

        if len(self.targets) >= 4096:
            print('Primer has more events than maximum sequence length; nothing will be generated.')
        # Generate sample events.
        sample_ids = next(self.samples)['outputs']

        midi_filename = self.decode(
                        sample_ids,
                        encoder=self.encoders['targets'])
        ns = mm.midi_file_to_note_sequence(midi_filename)
        # Append continuation to primer.
        continuation_ns = mm.concatenate_sequences([primer_ns, ns])

        request_dict = self.put_request_dict
        generated_sequence_2_mp3(continuation_ns, f"{self.unique_id}", use_salamander=True,
                                 request_dict=request_dict)
Esempio n. 5
0
    def generate_basic_notes(self, qpm=160, failsafe=False):
        """
        Requires melody conditioned model.
        """
        if not self.conditioned:
            raise ValueError("Model should be conditioned!")

        if failsafe:
            self.failsafe()

        else:
            melody_ns = copy.deepcopy(self.sequence)
            try:
                melody_instrument = mm.infer_melody_for_sequence(melody_ns)
                notes = [note for note in melody_ns.notes
                        if note.instrument == melody_instrument]

                melody_ns.notes.extend(
                    sorted(notes, key=lambda note: note.start_time))
                for i in range(len(melody_ns.notes) - 1):
                    melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time

                # sequence can only be one min to save time during inference.
                melody_ns = mm.extract_subsequence(melody_ns, 0, 60)
                self.inputs = self.encoders['inputs'].encode_note_sequence(
                            melody_ns)
                print("Melody successfully parsed and encoded!")
            except Exception as e:
                print(f"Error in encoding stage {e}")
                print("Resorting to a basic melody")
                self.failsafe()

        self.decode_length = 4096
        sample_ids = next(self.samples)['outputs']

        # Decode to NoteSequence.
        midi_filename = self.decode(
            sample_ids,
            encoder=self.encoders['targets'])
        accompaniment_ns = mm.midi_file_to_note_sequence(midi_filename)

        request_dict = self.put_request_dict
        generated_sequence_2_mp3(accompaniment_ns, f"{self.unique_id}", use_salamander=True,
                                 request_dict=request_dict)
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorException(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.extract_subsequence(input_sequence,
                                                     input_section.start_time,
                                                     input_section.end_time)
            input_start_step = self.seconds_to_steps(input_section.start_time,
                                                     qpm)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = self.seconds_to_steps(
            generate_section.start_time, qpm)
        generate_end_step = self.seconds_to_steps(generate_section.end_time,
                                                  qpm)

        if extracted_seqs and extracted_seqs[0]:
            poly_seq = extracted_seqs[0]
        else:
            # If no track could be extracted, create an empty track that starts at the
            # requested generate_start_step. This will result in a sequence that
            # contains only the START token.
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)

        # Ensure that the track extends up to the step we want to start generating.
        poly_seq.set_length(generate_start_step - poly_seq.start_step)
        # Trim any trailing end events to prepare the sequence for more events to be
        # appended during generation.
        poly_seq.trim_trailing_end_events()

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        # Inject the priming sequence as melody in the output of the generator.
        # Note that start_step is 0 because we overwrite poly_seq below. If we
        # included the priming sequence in poly_seq, it would be poly_seq.num_steps.
        melody_to_inject = copy.deepcopy(poly_seq)
        args['modify_events_callback'] = partial(_inject_melody,
                                                 melody_to_inject, 0)

        # Overwrite poly_seq with a blank sequence to feed into the generator so it
        # is conditioned only on the melody events that are injected as the sequence
        # is created. Otherwise, the generator would have to determine the most
        # likely sequence to follow a monophonic line, which is something not
        # present in the current training data (Bach Chorales).
        poly_seq = polyphony_lib.PolyphonicSequence(
            steps_per_quarter=(
                quantized_primer_sequence.quantization_info.steps_per_quarter),
            start_step=generate_start_step)
        poly_seq.trim_trailing_end_events()

        # If we wanted to include the priming sequence and didn't clear poly_seq
        # above, this is how we would calculate total_steps.
        # total_steps = poly_seq.num_steps + (
        #     generate_end_step - generate_start_step)

        total_steps = generate_end_step - generate_start_step

        while poly_seq.num_steps < total_steps:
            # Assume it takes ~5 rnn steps to generate one quantized step.
            # Can't know for sure until generation is finished because the number of
            # notes per quantized step is variable.
            steps_to_gen = total_steps - poly_seq.num_steps
            rnn_steps_to_gen = 5 * steps_to_gen
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            poly_seq = self._model.generate_polyphonic_sequence(
                len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
        poly_seq.set_length(total_steps)

        # Specify a base_note_sequence because the priming sequence is not included
        # in poly_seq. If we did not clear poly_seq above, then we would not want to
        # specify a base_note_sequence.
        generated_sequence = poly_seq.to_sequence(
            qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Esempio n. 7
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorException(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.extract_subsequence(input_sequence,
                                                     input_section.start_time,
                                                     input_section.end_time)
            input_start_step = self.seconds_to_steps(input_section.start_time,
                                                     qpm)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        start_step = self.seconds_to_steps(generate_section.start_time, qpm)
        end_step = self.seconds_to_steps(generate_section.end_time, qpm)

        if extracted_seqs and extracted_seqs[0]:
            poly_seq = extracted_seqs[0]
        else:
            # If no track could be extracted, create an empty track that starts 1 step
            # before the request start_step. This will result in 1 step of silence
            # when the track is extended below.
            poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=(
                quantized_primer_sequence.quantization_info.steps_per_quarter),
                                                        start_step=start_step)

        # Ensure that the track extends up to the step we want to start generating.
        poly_seq.set_length(start_step - poly_seq.start_step)
        poly_seq.trim_trailing_end_and_step_end_events()

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        total_steps = end_step - start_step
        while poly_seq.num_steps < total_steps:
            # Assume it takes ~5 rnn steps to generate one quantized step.
            # Can't know for sure until generation is finished because the number of
            # notes per quantized step is variable.
            steps_to_gen = total_steps - poly_seq.num_steps
            rnn_steps_to_gen = 5 * steps_to_gen
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            poly_seq = self._model.generate_polyphonic_sequence(
                len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
        poly_seq.set_length(total_steps)

        generated_sequence = poly_seq.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    qpm = (input_sequence.tempos[0].qpm
           if input_sequence and input_sequence.tempos
           else mm.DEFAULT_QUARTERS_PER_MINUTE)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.extract_subsequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = self._seconds_to_steps(input_section.start_time, qpm)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time >= generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_sequence = mm.quantize_note_sequence(
        primer_sequence, self._steps_per_quarter)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = mm.extract_melodies(
        quantized_sequence, search_start_step=input_start_step, min_bars=0,
        min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = self._seconds_to_steps(
        generate_section.start_time, qpm)
    end_step = self._seconds_to_steps(generate_section.end_time, qpm)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      melody = mm.Melody([], start_step=max(0, start_step - 1))

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(
        end_step - melody.start_step, melody, **args)
    generated_sequence = generated_melody.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Esempio n. 9
0
def render_sequence_to_music_dict(midi_file,
                                  music_dict,
                                  model_string="melody_rnn"):
    sequence = mm.midi_file_to_note_sequence(midi_file)
    # scale to num steps.
    music_dict['num_steps'] = 1024 * music_dict['length']
    backup_sequence = None
    basic_models = [
        "melody_rnn", "performance_rnn", "polyphony_rnn", "pianoroll_rnn_nade"
    ]
    if model_string in basic_models:
        subsequence = mm.extract_subsequence(sequence, 0.0, C.SUBSEQUENCE_TIME)
        for note in subsequence.notes:
            # rnns can work with piano data.
            note.program = 0
            note.instrument = 1
        music_dict['sequence'] = subsequence
        if model_string == "performance_rnn":
            music_dict['num_steps'] = music_dict['num_steps'] * 4

    elif model_string == "improv_rnn" or model_string == "music_vae":
        subsequence = mm.extract_subsequence(sequence, 0.0, C.SUBSEQUENCE_TIME)
        melody = mm.infer_melody_for_sequence(subsequence)

        new_sequence = music_pb2.NoteSequence()
        backup_sequence = music_pb2.NoteSequence()
        new_val = 0.
        backup_val = 0.
        for note in subsequence.notes:
            # rnns can work with piano data.
            if note.instrument == melody:
                start = note.start_time
                end = note.end_time
                diff = end - start

                new_sequence.notes.add(pitch=note.pitch,
                                       start_time=new_val,
                                       end_time=new_val + diff,
                                       velocity=160)
                backup_sequence.notes.add(pitch=note.pitch,
                                          start_time=backup_val,
                                          end_time=backup_val + 0.5,
                                          velocity=160)

                new_val += diff
                backup_val += 0.5
            if model_string == "improv_rnn":
                note.program = 0
                note.instrument = 1
        new_sequence.total_time = new_val
        new_sequence.tempos.add(qpm=subsequence.tempos[0].qpm)
        backup_sequence.total_time = backup_val
        backup_sequence.tempos.add(qpm=60)
        music_dict['sequence'] = subsequence
        music_dict['backup_sequence'] = backup_sequence

    elif model_string == "music_transformer":
        # model generate will take care of things
        music_dict['sequence'] = sequence

    return music_dict
Esempio n. 10
0
twinkle_twinkle.notes.add(pitch=67, start_time=1.5, end_time=2.0, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.0, end_time=2.5, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle_twinkle.total_time = 8
twinkle_twinkle.tempos.add(qpm=60)

babyshark = mm.midi_file_to_note_sequence('./mid/babyshark.mid')
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

# This gives us a list of sequences.
note_sequences = music_vae.interpolate(twinkle_twinkle,
Esempio n. 11
0
import magenta.music as mm

babyshark = mm.midi_file_to_note_sequence('./mid/babyshark_full.mid')

# truncate @ 14 seconds

babyshark = mm.extract_subsequence(babyshark, 14, 14 + 8.5)
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

mm.sequence_proto_to_midi_file(babyshark, './mid/babyshark.mid')
Esempio n. 12
0
primer = 'C major scale'  #@param ['C major arpeggio', 'C major scale', 'Clair de Lune', 'Upload your own!']

if primer == 'Upload your own!':
  primer_ns = upload_midi()
else:
  # Use one of the provided primers.
  primer_ns = mm.midi_file_to_note_sequence(filenames[primer])

# Handle sustain pedal in the primer.
primer_ns = mm.apply_sustain_control_changes(primer_ns)

# Trim to desired number of seconds.
max_primer_seconds = 20  #@param {type:"slider", min:1, max:120}
if primer_ns.total_time > max_primer_seconds:
  print('Primer is longer than %d seconds, truncating.' % max_primer_seconds)
  primer_ns = mm.extract_subsequence(
      primer_ns, 0, max_primer_seconds)

# Remove drums from primer if present.
if any(note.is_drum for note in primer_ns.notes):
  print('Primer contains drums; they will be removed.')
  notes = [note for note in primer_ns.notes if not note.is_drum]
  del primer_ns.notes[:]
  primer_ns.notes.extend(notes)

# Set primer instrument and program.
for note in primer_ns.notes:
  note.instrument = 1
  note.program = 0

# Play and plot the primer.
mm.play_sequence(
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.extract_subsequence(
          input_sequence, input_section.start_time, input_section.end_time)
    else:
      primer_sequence = input_sequence

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time >= generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_sequence = mm.quantize_note_sequence(
        primer_sequence, self._steps_per_quarter)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_drum_tracks, _ = mm.extract_drum_tracks(
        quantized_sequence, min_bars=0, gap_bars=float('inf'))
    assert len(extracted_drum_tracks) <= 1

    qpm = (primer_sequence.tempos[0].qpm
           if primer_sequence and primer_sequence.tempos
           else mm.DEFAULT_QUARTERS_PER_MINUTE)
    start_step = self._seconds_to_steps(
        generate_section.start_time, qpm)
    end_step = self._seconds_to_steps(generate_section.end_time, qpm)

    if extracted_drum_tracks and extracted_drum_tracks[0]:
      drums = extracted_drum_tracks[0]
    else:
      # If no drum track could be extracted, create an empty drum track that
      # starts 1 step before the request start_step. This will result in 1 step
      # of silence when the drum track is extended below.
      drums = mm.DrumTrack([], start_step=max(0, start_step - 1))

    # Ensure that the drum track extends up to the step we want to start
    # generating.
    drums.set_length(start_step - drums.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_drums = self._model.generate_drum_track(
        end_step - drums.start_step, drums, **args)
    generated_sequence = generated_drums.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Esempio n. 14
0
def trim_sequences(seqs, num_seconds=BAR_SECONDS):
  for i in range(len(seqs)):
    seqs[i] = mm.extract_subsequence(seqs[i], 0.0, num_seconds)
    seqs[i].total_time = num_seconds
Esempio n. 15
0
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    qpm = (input_sequence.tempos[0].qpm
           if input_sequence and input_sequence.tempos
           else mm.DEFAULT_QUARTERS_PER_MINUTE)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      # Use primer melody from input section only. Take backing chords from
      # beginning of input section through end of generate section.
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.extract_subsequence(
          input_sequence, input_section.start_time, input_section.end_time)
      backing_sequence = mm.extract_subsequence(
          input_sequence, input_section.start_time, generate_section.end_time)
      input_start_step = self.seconds_to_steps(input_section.start_time, qpm)
    else:
      # No input section. Take primer melody from the beginning of the sequence
      # up until the start of the generate section.
      primer_sequence = mm.extract_subsequence(
          input_sequence, 0.0, generate_section.start_time)
      backing_sequence = mm.extract_subsequence(
          input_sequence, 0.0, generate_section.end_time)
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time >= generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the input section. This model can only extend melodies. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming and backing sequences.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self._steps_per_quarter)
    quantized_backing_sequence = mm.quantize_note_sequence(
        backing_sequence, self._steps_per_quarter)

    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = mm.extract_melodies(
        quantized_primer_sequence, search_start_step=input_start_step,
        min_bars=0, min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = self.seconds_to_steps(
        generate_section.start_time, qpm)
    end_step = self.seconds_to_steps(generate_section.end_time, qpm)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      melody = mm.Melody([], start_step=max(0, start_step - 1))

    extracted_chords, _ = mm.extract_chords(quantized_backing_sequence)
    chords = extracted_chords[0]

    # Make sure that chords and melody start on the same step.
    if chords.start_step < melody.start_step:
      chords.set_length(len(chords) - melody.start_step + chords.start_step)

    assert chords.end_step == end_step

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(melody, chords, **args)
    generated_lead_sheet = mm.LeadSheet(generated_melody, chords)
    generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Esempio n. 16
0
def music_generator(primer='erik_gnossienne',
                    primer_begin_buffer=10,
                    primer_length=90,
                    output_path='.',
                    filename='./public/output'):
    SF2_PATH = './models/Yamaha-C5-Salamander-JNv5.1.sf2'
    SAMPLE_RATE = 16000

    # Upload a MIDI file and convert to NoteSequence.
    def upload_midi():
        data = list(files.upload().values())
        if len(data) > 1:
            print('Multiple files uploaded; using only one.')
        return mm.midi_to_note_sequence(data[0])

    # Decode a list of IDs.
    def decode(ids, encoder):
        ids = list(ids)
        if text_encoder.EOS_ID in ids:
            ids = ids[:ids.index(text_encoder.EOS_ID)]
        return encoder.decode(ids)

    model_name = 'transformer'
    hparams_set = 'transformer_tpu'
    ckpt_path = './models/checkpoints/unconditional_model_16.ckpt'

    class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
        @property
        def add_eos_symbol(self):
            return True

    problem = PianoPerformanceLanguageModelProblem()
    unconditional_encoders = problem.get_feature_encoders()

    # Set up HParams.
    hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
    trainer_lib.add_problem_hparams(hparams, problem)
    hparams.num_hidden_layers = 16
    hparams.sampling_method = 'random'

    # Set up decoding HParams.
    decode_hparams = decoding.decode_hparams()
    decode_hparams.alpha = 0.0
    decode_hparams.beam_size = 1

    # Create Estimator.
    run_config = trainer_lib.create_run_config(hparams)
    estimator = trainer_lib.create_estimator(model_name,
                                             hparams,
                                             run_config,
                                             decode_hparams=decode_hparams)

    # These values will be changed by subsequent cells.
    targets = []
    decode_length = 0

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        global targets
        global decode_length
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    filenames = {
        'C major arpeggio': './models/primers/c_major_arpeggio.mid',
        'C major scale': './models/primers/c_major_scale.mid',
        'Clair de Lune': './models/primers/clair_de_lune.mid',
        'Classical':
        'audio_midi/Classical_Piano_piano-midi.de_MIDIRip/bach/bach_846_format0.mid',
        'erik_gymnopedie': 'audio_midi/erik_satie/gymnopedie_1_(c)oguri.mid',
        'erik_gymnopedie_2': 'audio_midi/erik_satie/gymnopedie_2_(c)oguri.mid',
        'erik_gymnopedie_3': 'audio_midi/erik_satie/gymnopedie_3_(c)oguri.mid',
        'erik_gnossienne': 'audio_midi/erik_satie/gnossienne_1_(c)oguri.mid',
        'erik_gnossienne_2': 'audio_midi/erik_satie/gnossienne_2_(c)oguri.mid',
        'erik_gnossienne_3': 'audio_midi/erik_satie/gnossienne_3_(c)oguri.mid',
        'erik_gnossienne_dery':
        'audio_midi/erik_satie/gnossienne_1_(c)dery.mid',
        'erik_gnossienne_dery_2':
        'audio_midi/erik_satie/gnossienne_2_(c)dery.mid',
        'erik_gnossienne_dery_3':
        'audio_midi/erik_satie/gnossienne_3_(c)dery.mid',
        'erik_gnossienne_dery_5':
        'audio_midi/erik_satie/gnossienne_5_(c)dery.mid',
        'erik_gnossienne_dery_6':
        'audio_midi/erik_satie/gnossienne_6_(c)dery.mid',
        '1': 'audio_midi/erik_satie/1.mid',
        '2': 'audio_midi/erik_satie/2.mid',
        '3': 'audio_midi/erik_satie/3.mid',
        '4': 'audio_midi/erik_satie/4.mid',
        '5': 'audio_midi/erik_satie/5.mid',
        '6': 'audio_midi/erik_satie/6.mid',
        '7': 'audio_midi/erik_satie/7.mid',
        '8': 'audio_midi/erik_satie/8.mid',
        '9': 'audio_midi/erik_satie/9.mid',
        '10': 'audio_midi/erik_satie/10.mid',
    }
    # primer = 'C major scale'

    #if primer == 'Upload your own!':
    #  primer_ns = upload_midi()
    #else:
    #  # Use one of the provided primers.
    #  primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    # Handle sustain pedal in the primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    max_primer_seconds = primer_length
    if primer_ns.total_time > max_primer_seconds:
        print('Primer is longer than %d seconds, truncating.' %
              max_primer_seconds)
        primer_ns = mm.extract_subsequence(
            primer_ns, primer_begin_buffer,
            max_primer_seconds + primer_begin_buffer)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        print('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    ## Play and plot the primer.
    #mm.play_sequence(
    #    primer_ns,
    #    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    #mm.plot_sequence(primer_ns)
    mm.sequence_proto_to_midi_file(
        primer_ns, join(output_path, 'primer_{}.mid'.format(filename)))

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, 10000 - len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = mm.midi_file_to_note_sequence(midi_filename)
    print('Sample IDs: {}'.format(sample_ids))
    print('Sample IDs length: {}'.format(len(sample_ids)))
    print('Encoder: {}'.format(unconditional_encoders['targets']))
    print('Unconditional Samples: {}'.format(unconditional_samples))
    # print('{}'.format(ns))

    # continuation_ns = mm.concatenate_sequences([primer_ns, ns])
    continuation_ns = ns
    # mm.play_sequence(
    #     continuation_ns,
    #     synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    # mm.plot_sequence(continuation_ns)
    # try:
    audio = mm.fluidsynth(continuation_ns,
                          sample_rate=SAMPLE_RATE,
                          sf2_path=SF2_PATH)

    normalizer = float(np.iinfo(np.int16).max)
    array_of_ints = np.array(np.asarray(audio) * normalizer, dtype=np.int16)

    wavfile.write(join(output_path, filename + '.wav'), SAMPLE_RATE,
                  array_of_ints)
    print('[+] Output stored as {}'.format(filename + '.wav'))
    mm.sequence_proto_to_midi_file(
        continuation_ns,
        join(output_path, 'continuation_{}.mid'.format(filename)))
Esempio n. 17
0
 def trim_sequence(self, seq, num_seconds=12.0):
     seq = mm.extract_subsequence(seq, 0.0, num_seconds)
     seq.total_time = num_seconds