Exemplo n.º 1
0
 def _to_notesequences(self, samples):
     output_sequences = []
     for s in samples:
         if self._roll_output:
             if self.end_token is not None:
                 end_i = np.where(s[:, self.end_token])
                 if len(end_i):  # pylint: disable=g-explicit-length-test
                     s = s[:end_i[0]]
             events_list = [frozenset(np.where(e)[0]) for e in s]
         else:
             s = np.argmax(s, axis=-1)
             if self.end_token is not None and self.end_token in s:
                 s = s[:s.tolist().index(self.end_token)]
             events_list = [
                 self._oh_encoder_decoder.decode_event(e) for e in s
             ]
         # Map classes to exemplars.
         events_list = [
             frozenset(self._pitch_classes[c][0] for c in e)
             for e in events_list
         ]
         track = mm.DrumTrack(events=events_list,
                              steps_per_bar=self._steps_per_bar,
                              steps_per_quarter=self._steps_per_quarter)
         output_sequences.append(track.to_sequence(velocity=80))
     return output_sequences
Exemplo n.º 2
0
def midiToMatrix(midi_file, steps_per_quarter):
    """
        Args:
            midi_file: Ruta donde se encuentra el archivo midi.
            steps_per_quarter: Division del primer tiempo en 4 semicorcheas, 8 en fusas y 16 en semifusas.

        Returns:
            aa: Matriz binaria, jump: Duracion en tiempo de cada fila, steps_per_bar: Numero de filas en el primer tiempo, steps_per_quarter: Numero de filas del primer compas.
    """
    print(midi_file)

    sequence = mm.midi_file_to_sequence_proto(midi_file)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        sequence, steps_per_quarter=steps_per_quarter)
    drum_song = mm.DrumTrack()
    drum_song.from_quantized_sequence(quantized_sequence, gap_bars=50)

    steps_per_bar = drum_song.steps_per_bar
    steps_per_quarter = drum_song.steps_per_quarter

    print('steps_per_bar: ', drum_song.steps_per_bar)
    print('steps_per_quarter: ', drum_song.steps_per_quarter)

    drum_song = drum_song.to_sequence()

    print("total_time", drum_song.total_time)

    count = 0
    fila = 0
    jump = drum_song.notes[0].end_time - drum_song.notes[0].start_time

    print("step_duration", jump)

    active = collections.defaultdict(list)
    while count + jump <= drum_song.total_time:
        active['fila'].append(fila)
        active['start_time'].append(count)
        active['end_time'].append(count + jump)

        count += jump
        fila += 1
    df_1 = pd.DataFrame(active)
    #print(df_1)

    aa = np.zeros((fila, PITCH_LIMIT))
    print()
    print("WaitMidiToMatrix...........:(")
    for note in drum_song.notes:
        for indice_fila, fila in df_1.iterrows():
            if fila.start_time == note.start_time and note.end_time == fila.end_time:
                aa[int(fila.fila), note.pitch] = 1

    print("Done.......................:)")
    print()
    return aa, jump, steps_per_bar, steps_per_quarter
Exemplo n.º 3
0
def generate(unused_argv):
  """Generates a basic drum sequence of 4 seconds based on a hard coded
  primer"""

  # TODO doc

  mm.notebook_utils.download_bundle("drum_kit_rnn.mag", "bundles")
  bundle = mm.sequence_generator_bundle.read_bundle_file(
    os.path.join("bundles", "drum_kit_rnn.mag"))

  generator_map = drums_rnn_sequence_generator.get_generator_map()
  generator = generator_map["drum_kit"](checkpoint=None, bundle=bundle)
  generator.initialize()

  qpm = 120
  num_bars = 3
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  num_steps_per_bar = constants.DEFAULT_STEPS_PER_BAR
  seconds_per_bar = num_steps_per_bar * seconds_per_step

  # Creates a primer sequence that is fed into the model for the generator,
  # which will generate a sequence based on this one
  # A DrumTrack models a drum sequence by step, so you have step 1 being the
  # midi note 36 (bass drum), followed by 3 steps of silence (those four steps
  # constitutes the first beat or quarter), followed by both notes 36 and 41
  # being struck at the same time (followed by silence by these are optional)
  primer_drums = mm.DrumTrack(
    [frozenset(pitches) for pitches in
     [(38, 51), (), (36,), (),
      (38, 44, 51), (), (36,), (),
      (), (), (38,), (),
      (38, 44), (), (36, 51), (), ]])
  primer_sequence = primer_drums.to_sequence(qpm=qpm)
  primer_start_time = 0
  primer_end_time = primer_start_time + seconds_per_bar

  # Defines the start and end of the generation, which starts at the step
  # after the end of the primer (we'll see in 03.py this calculation makes
  # it harder to fall on proper beats) and ends at total seconds
  # The complete generation will thus contain the primer and the total length
  # needs to be at least the size of the primer
  # TODO doc
  generation_start_time = primer_end_time
  generation_end_time = generation_start_time + (seconds_per_bar * num_bars)

  generator_options = generator_pb2.GeneratorOptions()
  generator_options.args['temperature'].float_value = 1.1
  generator_options.generate_sections.add(
    start_time=generation_start_time,
    end_time=generation_end_time)

  # We are using the primer sequence here instead of an empty sequence
  sequence = generator.generate(primer_sequence, generator_options)

  # TODO doc
  plot_file = os.path.join("output", "out.html")
  print("Generated plot file: " + str(os.path.abspath(plot_file)))
  pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
  plotter = Plotter()
  plotter.show(pretty_midi, plot_file)

  # TODO doc
  input_ports = [name for name in mido.get_output_names()
                 if "VirtualMIDISynth" in name
                 or "FLUID Synth" in name]
  if not input_ports:
    print("Cannot find proper input port in "
          + str(mido.get_output_names()))
  print("Playing generated MIDI in input port names: "
        + str(input_ports))
  midi_hub = mh.MidiHub([], input_ports, None)

  # TODO doc
  empty_sequence = music_pb2.NoteSequence()
  player = midi_hub.start_playback(empty_sequence,
                                   allow_updates=True)
  player._channel = 9

  # TODO doc
  wall_start_time = time.time()
  sequence_adjusted = music_pb2.NoteSequence()
  sequence_adjusted.CopyFrom(sequence)
  sequence_adjusted = adjust_sequence_times(sequence_adjusted,
                                            wall_start_time)
  player.update_sequence(sequence_adjusted,
                         start_time=wall_start_time)

  # TODO doc
  try:
    player.join(generation_end_time)
  except KeyboardInterrupt:
    return 0
  finally:
    return 0
Exemplo n.º 4
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        if input_sequence and input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm
        else:
            qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0.0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = mm.quantize_note_sequence(primer_sequence,
                                                       self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_drum_tracks, _ = drum_pipelines.extract_drum_tracks(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            gap_bars=float('inf'),
            ignore_is_drum=True)
        assert len(extracted_drum_tracks) <= 1

        start_step = mm.quantize_to_step(generate_section.start_time,
                                         steps_per_second,
                                         quantize_cutoff=0.0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        end_step = mm.quantize_to_step(generate_section.end_time,
                                       steps_per_second,
                                       quantize_cutoff=1.0)

        if extracted_drum_tracks and extracted_drum_tracks[0]:
            drums = extracted_drum_tracks[0]
        else:
            # If no drum track could be extracted, create an empty drum track that
            # starts 1 step before the request start_step. This will result in 1 step
            # of silence when the drum track is extended below.
            steps_per_bar = int(
                mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
            drums = mm.DrumTrack([],
                                 start_step=max(0, start_step - 1),
                                 steps_per_bar=steps_per_bar,
                                 steps_per_quarter=self.steps_per_quarter)

        # Ensure that the drum track extends up to the step we want to start
        # generating.
        drums.set_length(start_step - drums.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_drums = self._model.generate_drum_track(
            end_step - drums.start_step, drums, **args)
        generated_sequence = generated_drums.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorException(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        qpm = (input_sequence.tempos[0].qpm if input_sequence
               and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = self.seconds_to_steps(input_section.start_time,
                                                     qpm)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = mm.quantize_note_sequence(primer_sequence,
                                                       self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_drum_tracks, _ = mm.extract_drum_tracks(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            gap_bars=float('inf'))
        assert len(extracted_drum_tracks) <= 1

        start_step = self.seconds_to_steps(generate_section.start_time, qpm)
        end_step = self.seconds_to_steps(generate_section.end_time, qpm)

        if extracted_drum_tracks and extracted_drum_tracks[0]:
            drums = extracted_drum_tracks[0]
        else:
            # If no drum track could be extracted, create an empty drum track that
            # starts 1 step before the request start_step. This will result in 1 step
            # of silence when the drum track is extended below.
            drums = mm.DrumTrack([], start_step=max(0, start_step - 1))

        # Ensure that the drum track extends up to the step we want to start
        # generating.
        drums.set_length(start_step - drums.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_drums = self._model.generate_drum_track(
            end_step - drums.start_step, drums, **args)
        generated_sequence = generated_drums.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Exemplo n.º 6
0
 def drumtrack_fn():
     return mm.DrumTrack(steps_per_bar=steps_per_bar,
                         steps_per_quarter=steps_per_quarter)
def generate(unused_argv):
    # Downloads the bundle from the magenta website, a bundle (.mag file) is a
    # trained model that is used by magenta
    mm.notebook_utils.download_bundle("drum_kit_rnn.mag", "bundles")
    bundle = mm.sequence_generator_bundle.read_bundle_file(
        os.path.join("bundles", "drum_kit_rnn.mag"))

    # Initialize the generator "drum_kit", this need to fit the bundle we
    # downloaded before
    generator_map = drums_rnn_sequence_generator.get_generator_map()
    generator = generator_map["drum_kit"](checkpoint=None, bundle=bundle)
    generator.initialize()

    # We will generate 3 bars, so with a
    # 1 bar primer we'll have 4 bars total
    num_bars = 3
    qpm = 120

    # The steps per quarter for this generator
    # is 4 steps per quarter
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter

    # We are using a default 16 steps per bar, which is
    # 4/4 music sampled at 4 steps per quarter note
    num_steps_per_bar = constants.DEFAULT_STEPS_PER_BAR

    # We calculate how many seconds per bar for
    # the generation time
    seconds_per_bar = num_steps_per_bar * seconds_per_step

    print(f"Seconds per step: {seconds_per_step}")
    print(f"Seconds per bar: {seconds_per_bar}")

    # Creates a primer sequence that is fed into the model for the generator,
    # which will generate a sequence based on this one
    # A DrumTrack models a drum sequence by step, so you have step 1 being the
    # midi note 36 (bass drum), followed by 3 steps of silence (those four steps
    # constitutes the first beat or quarter), followed by both notes 36 and 41
    # being struck at the same time (followed by silence by these are optional)
    primer_drums = mm.DrumTrack([
        frozenset(pitches) for pitches in [
            (38, 51),
            (),
            (36, ),
            (),
            (38, 44, 51),
            (),
            (36, ),
            (),
            (),
            (),
            (38, ),
            (),
            (38, 44),
            (),
            (36, 51),
            (),
        ]
    ])
    primer_sequence = primer_drums.to_sequence(qpm=qpm)

    # We store those time because the generation
    # will start after the end of the primer
    primer_start_time = 0
    primer_end_time = primer_start_time + seconds_per_bar

    # We calculate the generation start and end
    # for a duration of num_bars
    generation_start_time = primer_end_time
    generation_end_time = generation_start_time + (seconds_per_bar * num_bars)

    print(f"Primer start and end:" f"[{primer_start_time}, {primer_end_time}]")
    print(f"Generation start and end:"
          f"[{generation_start_time}, {generation_end_time}]")

    # The generator interface is common for all models
    generator_options = generator_pb2.GeneratorOptions()

    # Add a bit of temperature for more flavor
    temperature = 1.1
    print(f"Temperature: {temperature}")
    generator_options.args['temperature'].float_value = temperature

    # Defines the generation section
    generator_options.generate_sections.add(start_time=generation_start_time,
                                            end_time=generation_end_time)

    # We are using the primer sequence here instead of an empty sequence,
    # the resulting sequence is a NoteSequence instance
    sequence = generator.generate(primer_sequence, generator_options)

    # Write the resulting midi file to the output directory
    midi_file = os.path.join("output", "out.mid")
    mm.midi_io.note_sequence_to_midi_file(sequence, midi_file)
    print(f"Generated midi file: {os.path.abspath(midi_file)}")

    # Write the resulting plot file to the output directory
    plot_file = os.path.join("output", "out.html")
    pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
    plotter = Plotter()
    plotter.show(pretty_midi, plot_file)
    print(f"Generated plot file: {os.path.abspath(plot_file)}")

    return 0
def generate(unused_argv):
    """Generates a basic drum sequence of 4 seconds based on a hard coded
  primer"""

    # TODO doc

    mm.notebook_utils.download_bundle("drum_kit_rnn.mag", "bundles")
    bundle = mm.sequence_generator_bundle.read_bundle_file(
        os.path.join("bundles", "drum_kit_rnn.mag"))

    generator_map = drums_rnn_sequence_generator.get_generator_map()
    generator = generator_map["drum_kit"](checkpoint=None, bundle=bundle)
    generator.initialize()

    qpm = 120
    num_bars = 3
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    num_steps_per_bar = constants.DEFAULT_STEPS_PER_BAR
    seconds_per_bar = num_steps_per_bar * seconds_per_step

    # Creates a primer sequence that is fed into the model for the generator,
    # which will generate a sequence based on this one
    # A DrumTrack models a drum sequence by step, so you have step 1 being the
    # midi note 36 (bass drum), followed by 3 steps of silence (those four steps
    # constitutes the first beat or quarter), followed by both notes 36 and 41
    # being struck at the same time (followed by silence by these are optional)
    primer_drums = mm.DrumTrack([
        frozenset(pitches) for pitches in [
            (38, 51),
            (),
            (36, ),
            (),
            (38, 44, 51),
            (),
            (36, ),
            (),
            (),
            (),
            (38, ),
            (),
            (38, 44),
            (),
            (36, 51),
            (),
        ]
    ])
    primer_sequence = primer_drums.to_sequence(qpm=qpm)
    primer_start_time = 0
    primer_end_time = primer_start_time + seconds_per_bar

    # Defines the start and end of the generation, which starts at the step
    # after the end of the primer (we'll see in 03.py this calculation makes
    # it harder to fall on proper beats) and ends at total seconds
    # The complete generation will thus contain the primer and the total length
    # needs to be at least the size of the primer
    # TODO doc
    generation_start_time = primer_end_time
    generation_end_time = generation_start_time + (seconds_per_bar * num_bars)

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = 1.1
    generator_options.generate_sections.add(start_time=generation_start_time,
                                            end_time=generation_end_time)

    # We are using the primer sequence here instead of an empty sequence
    sequence = generator.generate(primer_sequence, generator_options)

    midi_file = os.path.join("output", "out.mid")
    mm.midi_io.note_sequence_to_midi_file(sequence, midi_file)
    print("Generated midi file: " + str(os.path.abspath(midi_file)))

    plot_file = os.path.join("output", "out.html")
    print("Generated plot file: " + str(os.path.abspath(plot_file)))
    pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
    plotter = Plotter()
    plotter.show(pretty_midi, plot_file)

    return 0