Пример #1
0
def create_song_prototype(song_path,
                          start_time,
                          stop_time,
                          model_used='attention_rnn',
                          temperature=1.0):
    magenta_model_path = '%s/magenta_models/%s.mag' % (MEDIA_ROOT, model_used)
    bundle = mm.sequence_generator_bundle.read_bundle_file(magenta_model_path)
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model_used](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    base_sequence = midi_file_to_note_sequence(song_path)
    target_sequence = extract_subsequence(base_sequence, start_time, stop_time)

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generator_options.generate_sections.add(
        start_time=target_sequence.total_time,
        end_time=2 * target_sequence.total_time)
    generated_sequence = melody_rnn.generate(target_sequence,
                                             generator_options)

    proceed_sequence = extract_subsequence(generated_sequence,
                                           target_sequence.total_time,
                                           2 * target_sequence.total_time)

    return proceed_sequence
Пример #2
0
def generateNewSequence(input_sequence, num_steps, temperature, write_to_file = False):

    input_sequence = mm.quantize_note_sequence(input_sequence, 8)
    bundle = sequence_generator_bundle.read_bundle_file('drum_kit_rnn.mag')
    generator_map = drums_rnn_sequence_generator.get_generator_map()
    drum_rnn = generator_map['drum_kit'](checkpoint=None, bundle=bundle)
    drum_rnn.initialize()

    qpm = input_sequence.tempos[0].qpm
    last_end_time = (max(n.end_time for n in input_sequence.notes)
                      if input_sequence.notes else 0)
    total_seconds = num_steps * input_sequence.quantization_info.steps_per_quarter;

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(start_time=last_end_time, end_time=total_seconds)

    new_sequence = drum_rnn.generate(input_sequence, generator_options)
    new_sequence.quantization_info.steps_per_quarter = 8
    mm.quantize_note_sequence(new_sequence, 8)

    if (write_to_file == True):
        mm.sequence_proto_to_midi_file(input_sequence, 'oldSequence.mid')
        mm.sequence_proto_to_midi_file(new_sequence, 'newSequence.mid')

    return new_sequence
Пример #3
0
def ns_performance(temperature, output_path):
	# Constants.
	t1 = time.time()
	BUNDLE_DIR = './bundles'
	MODEL_NAME = 'performance_with_dynamics'
	BUNDLE_NAME = MODEL_NAME + '.mag'

	BUNDLE_ABS_PATH = '/Users/bregy/PycharmProjects/neurospace/bundles/performance_with_dynamics.mag'

	#mm.notebook_utils.download_bundle(BUNDLE_NAME, BUNDLE_DIR)
	bundle = mm.sequence_generator_bundle.read_bundle_file(BUNDLE_ABS_PATH)
	#bundle = mm.sequence_generator_bundle.read_bundle_file(os.path.join(BUNDLE_DIR, BUNDLE_NAME))
	generator_map = performance_sequence_generator.get_generator_map()
	generator = generator_map[MODEL_NAME](checkpoint=None, bundle=bundle)
	generator.initialize()
	generator_options = generator_pb2.GeneratorOptions()
	generator_options.args['temperature'].float_value = temperature
	generate_section = generator_options.generate_sections.add(start_time=0, end_time=120)
	sequence = generator.generate(music_pb2.NoteSequence(), generator_options)

	# Play this masterpiece.

	t2 = time.time()

	midi = mm.sequence_proto_to_pretty_midi(sequence)

	a = midi.fluidsynth()
	write(output_path, 44100, a)
Пример #4
0
def extendTwinkle():
    twinkle_twinkle = createTwinkle()
    print("Initializing Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file('./basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    input_sequence = twinkle_twinkle # change this to teapot if you want
    num_steps = 128 # change this for shorter or longer sequences
    temperature = 1.0 # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in input_sequence.notes)
                      if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    sequence = melody_rnn.generate(input_sequence, generator_options)

    mm.sequence_proto_to_midi_file(sequence, 'twinkleExtended.mid')
Пример #5
0
def generateNewSequence(input_sequence, temperature, write_to_file):

    input_sequence = mm.quantize_note_sequence(input_sequence, 8)
    bundle = sequence_generator_bundle.read_bundle_file(
        '/Library/Application Support/Quin Scacheri/Magenta Beats/drum_kit_rnn.mag'
    )
    generator_map = drums_rnn_sequence_generator.get_generator_map()
    drum_rnn = generator_map['drum_kit'](checkpoint=None, bundle=bundle)
    drum_rnn.initialize()

    qpm = input_sequence.tempos[0].qpm
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    #    total_seconds = num_steps * input_sequence.quantization_info.steps_per_quarter;

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time, end_time=8.0)

    new_sequence = drum_rnn.generate(input_sequence, generator_options)
    new_sequence = mm.trim_note_sequence(new_sequence, 2.0, 4.0)
    new_sequence = mm.quantize_note_sequence(new_sequence, 4)
    #
    #    new_sequence.quantization_info.steps_per_quarter = 8

    if (True):
        mm.sequence_proto_to_midi_file(input_sequence, 'primer.mid')
        mm.sequence_proto_to_midi_file(new_sequence, 'new_sequence.mid')

    return new_sequence
Пример #6
0
def generate_one_bar_sequence(generator,
                              qpm=120.0,
                              temp=1.0,
                              number_of_steps=16,
                              beam_size=4,
                              steps_per_quarter=4,
                              primer=[]):
    generator_options = generator_pb2.GeneratorOptions()

    generator_options.args[
        'temperature'].float_value = temp  # Higher is more random; 1.0 is default.
    generator_options.args['beam_size'].int_value = beam_size

    if len(primer) != 0:
        print("Using primer" + str(primer))
        primer_drums = magenta.music.DrumTrack(
            [frozenset(pitches) for pitches in primer])
        primer_sequence = primer_drums.to_sequence(qpm=qpm)
    else:
        print("no primer")
        primer_sequence = music_pb2.NoteSequence()

    total_seconds = compute_time(qpm, number_of_steps, steps_per_quarter)
    last_end_time = compute_time(qpm, len(primer), steps_per_quarter)

    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time, end_time=total_seconds)

    return generator.generate(primer_sequence, generator_options)
Пример #7
0
 def _generate_melody(self):
     melody_config_id = self.melody_bundle.generator_details.id
     melody_config = melody_rnn_model.default_configs[melody_config_id]
     generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
         model=melody_rnn_model.MelodyRnnModel(melody_config),
         details=melody_config.details,
         steps_per_quarter=melody_config.steps_per_quarter,
         checkpoint=melody_rnn_generate.get_checkpoint(),
         bundle=self.melody_bundle)
     generator_options = generator_pb2.GeneratorOptions()
     generator_options.args['temperature'].float_value = self.temperature
     generator_options.args['beam_size'].int_value = 1
     generator_options.args['branch_factor'].int_value = 1
     generator_options.args['steps_per_iteration'].int_value = 1
     primer_melody = magenta.music.Melody(self.accumulated_primer_melody)
     qpm = magenta.music.DEFAULT_QUARTERS_PER_MINUTE
     primer_sequence = primer_melody.to_sequence(qpm=qpm)
     seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
     # Set the start time to begin on the next step after the last note ends.
     last_end_time = (max(
         n.end_time
         for n in primer_sequence.notes) if primer_sequence.notes else 0)
     melody_total_seconds = last_end_time * 3
     generate_section = generator_options.generate_sections.add(
         start_time=last_end_time + seconds_per_step,
         end_time=melody_total_seconds)
     generated_sequence = generator.generate(primer_sequence,
                                             generator_options)
     self.generated_melody = [n.pitch for n in generated_sequence.notes]
     # Get rid of primer melody.
     self.generated_melody = self.generated_melody[
         len(self.accumulated_primer_melody):]
     # Make sure generated melody is not too long.
     self.generated_melody = self.generated_melody[:self.max_robot_length]
     self.accumulated_primer_melody = []
Пример #8
0
def generate_midi(midi_data, total_seconds=10):
    primer_sequence = magenta.music.midi_io.midi_to_sequence_proto(midi_data)

    # predict the tempo
    if len(primer_sequence.notes) > 4:
        estimated_tempo = midi_data.estimate_tempo()
        if estimated_tempo > 240:
            qpm = estimated_tempo / 2
        else:
            qpm = estimated_tempo
    else:
        qpm = 120
    primer_sequence.tempos[0].qpm = qpm

    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in primer_sequence.notes) if primer_sequence.notes else 0)
    generator_options.generate_sections.add(start_time=last_end_time +
                                            _steps_to_seconds(1, qpm),
                                            end_time=total_seconds)

    # generate the output sequence
    generated_sequence = generator.generate(primer_sequence, generator_options)

    output = tempfile.NamedTemporaryFile()
    magenta.music.midi_io.sequence_proto_to_midi_file(generated_sequence,
                                                      output.name)
    output.seek(0)
    return output
Пример #9
0
def synthesize(midi_file,
               model='basic',
               num_steps=2000,
               max_primer_notes=32,
               temperature=1.0,
               beam_size=1,
               branch_factor=1,
               steps_per_quarter=16,
               **kwargs):
    """Summary

    Parameters
    ----------
    midi_file : TYPE
        Description
    model : str, optional
        Description
    num_steps : int, optional
        Description
    max_primer_notes : int, optional
        Description
    temperature : float, optional
        Description
    beam_size : int, optional
        Description
    branch_factor : int, optional
        Description
    steps_per_quarter : int, optional
        Description
    **kwargs
        Description
    """
    config = melody_rnn_model.default_configs['{}_rnn'.format(model)]
    bundle_file = '{}_rnn.mag'.format(model)
    generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
        model=melody_rnn_model.MelodyRnnModel(config),
        details=config.details,
        steps_per_quarter=steps_per_quarter,
        bundle=magenta.music.read_bundle_file(bundle_file))

    # Get a protobuf of the MIDI file
    seq, qpm = parse_midi_file(midi_file, **kwargs)

    opt = generator_pb2.GeneratorOptions()
    seconds_per_step = 60.0 / qpm / steps_per_quarter
    total_seconds = num_steps * seconds_per_step
    last_end_time = max(n.end_time for n in seq.notes)
    opt.generate_sections.add(start_time=last_end_time + seconds_per_step,
                              end_time=total_seconds)
    opt.args['temperature'].float_value = temperature
    opt.args['beam_size'].int_value = beam_size
    opt.args['branch_factor'].int_value = branch_factor
    opt.args['steps_per_iteration'].int_value = 1

    print(opt)
    generated = generator.generate(seq, opt)
    fname = 'primer.mid'
    magenta.music.sequence_proto_to_midi_file(seq, fname)
    fname = 'synthesis.mid'
    magenta.music.sequence_proto_to_midi_file(generated, fname)
Пример #10
0
    def create_midi_test(self, midi_data):

        BUNDLE_NAME = 'attention_rnn'

        config = magenta.models.melody_rnn.melody_rnn_model.default_configs[
            BUNDLE_NAME]
        bundle_file = magenta.music.read_bundle_file(
            os.path.abspath(BUNDLE_NAME + '.mag'))
        steps_per_quarter = 4

        generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=steps_per_quarter,
            # checkpoint=get_checkpoint(),
            bundle=bundle_file)

        qpm = 120
        generator_options = generator_pb2.GeneratorOptions()
        seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
        total_seconds = 3

        primer_sequence = magenta.music.midi_to_sequence_proto(midi_data)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm
        if primer_sequence:
            input_sequence = primer_sequence
            # Set the start time to begin on the next step after the last note ends.
            last_end_time = (max(n.end_time for n in primer_sequence.notes)
                             if primer_sequence.notes else 0)
            # generate_section = generator_options.generate_sections.add(
            #     start_time=last_end_time + seconds_per_step,
            #     end_time=total_seconds)

        input_sequence = music_pb2.NoteSequence()
        input_sequence.tempos.add().qpm = qpm
        generate_section = generator_options.generate_sections.add(
            start_time=0, end_time=total_seconds)
        # generator_options.args['temperature'].float_value = FLAGS.temperature
        # generator_options.args['beam_size'].int_value = FLAGS.beam_size
        # generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
        # generator_options.args[
        #       'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
        # tf.logging.debug('input_sequence: %s', input_sequence)
        # tf.logging.debug('generator_options: %s', generator_options)

        # Make the generate request num_outputs times and save the output as midi
        # files.
        date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
        digits = 1

        generated_sequence = generator.generate(input_sequence,
                                                generator_options)
        midi_filename = '%s_%s.mid' % (date_and_time, str(1).zfill(digits))
        midi_path = os.path.join("./midifile", midi_filename)
        magenta.music.sequence_proto_to_midi_file(generated_sequence,
                                                  midi_path)

        tf.logging.info('Wrote %d MIDI files to %s', "1", "midi folder")
Пример #11
0
def generate(primer=example,
             qpm=120,
             num_steps=120,
             temperature=1,
             branch_factor=1,
             beam_size=2,
             steps_per_iteration=1):

    primer_drums = magenta.music.DrumTrack(
        [frozenset(pitches) for pitches in ast.literal_eval(primer)])

    primer_sequence = primer_drums.to_sequence(qpm=qpm)
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    total_seconds = num_steps * seconds_per_step
    generator_options = generator_pb2.GeneratorOptions()

    input_sequence = primer_sequence
    last_end_time = (max(
        n.end_time
        for n in primer_sequence.notes) if primer_sequence.notes else 0)

    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Generation length '
            'requested: %s', generate_section.start_time, total_seconds)
    else:
        generator_options.args['temperature'].float_value = temperature
        generator_options.args['beam_size'].int_value = beam_size
        generator_options.args['branch_factor'].int_value = branch_factor
        generator_options.args[
            'steps_per_iteration'].int_value = steps_per_iteration

        generated_sequence = generator.generate(input_sequence,
                                                generator_options)
        generated_midi = magenta.music.sequence_proto_to_pretty_midi(
            generated_sequence)
        instrument = generated_midi.instruments[1]

    bins = binNotes(instrument.notes)

    distances = []
    for i in range(12):
        distances.append(getDistance(fragment(bins, i), primer_drums))

    print "MAX distance: ", max(distances)
    if max(distances) == 0: return -1

    # print "MAX fragment: ", np.argmax(distances)
    # print "AVG distance: ", np.average(distances)

    nNotes = 0
    for n in fragment(bins, np.argmax(distances)):
        nNotes += len(n)
    if nNotes == 0: return -1
    return encondeFragment(fragment(bins, np.argmax(distances)))
Пример #12
0
    def create_midi(self, midi_data):

        BUNDLE_NAME = 'attention_rnn'

        config = magenta.models.melody_rnn.melody_rnn_model.default_configs[
            BUNDLE_NAME]
        bundle_file = magenta.music.read_bundle_file(
            os.path.abspath(BUNDLE_NAME + '.mag'))
        steps_per_quarter = 4

        generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=steps_per_quarter,
            # checkpoint=get_checkpoint(),
            bundle=bundle_file)

        qpm = 120
        generator_options = generator_pb2.GeneratorOptions()
        seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
        total_seconds = 3

        primer_sequence = magenta.music.midi_to_sequence_proto(midi_data)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm
        if primer_sequence:
            input_sequence = primer_sequence
            # Set the start time to begin on the next step after the last note ends.
            last_end_time = (max(n.end_time for n in primer_sequence.notes)
                             if primer_sequence.notes else 0)
            # generate_section = generator_options.generate_sections.add(
            #     start_time=last_end_time + seconds_per_step,
            #     end_time=total_seconds)

        input_sequence = music_pb2.NoteSequence()
        input_sequence.tempos.add().qpm = qpm
        generate_section = generator_options.generate_sections.add(
            start_time=0, end_time=total_seconds)
        # generator_options.args['temperature'].float_value = FLAGS.temperature
        # generator_options.args['beam_size'].int_value = FLAGS.beam_size
        # generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
        # generator_options.args[
        #       'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
        # tf.logging.debug('input_sequence: %s', input_sequence)
        # tf.logging.debug('generator_options: %s', generator_options)

        generated_sequence = generator.generate(input_sequence,
                                                generator_options)

        output = tempfile.NamedTemporaryFile()
        magenta.music.midi_io.sequence_proto_to_midi_file(
            generated_sequence, output.name)
        output.seek(0)
        return output
Пример #13
0
def _generator_options(input_sequence, num_steps, rnn_model, temperature):
    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature

    last_end_time = max(n.end_time for n in input_sequence.notes) if input_sequence.notes else 0
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / rnn_model.steps_per_quarter

    start_time = last_end_time + seconds_per_step
    generation_seconds = num_steps * seconds_per_step
    generator_options.generate_sections.add(start_time=start_time, end_time=start_time + generation_seconds)
    return generator_options
Пример #14
0
def run_with_config(generator, config, primer_sequence=None):
    """Generates melodies and adds them to a RheingoldGraph instance.

    TF & Magenta interaction based on Magenta melody_rnn_generate.py

    Args:
        generator: The MelodyRnnSequencedGenerator to use for melody generation
        config: RheingoldMagentaConfig
        primer_sequence: ProtoBuf NoteSequece
    """
    if not primer_sequence:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to a single middle C.')
        primer_melody = magenta.music.Melody([60])
        primer_sequence = primer_melody.to_sequence(config.qpm)

    # Derive the total number of seconds to generate based on the QPM of the
    # priming sequence and the num_steps flag
    seconds_per_step = 60.0 / config.qpm / generator.steps_per_quarter
    total_seconds = config.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    last_end_time = (max(
        n.end_time
        for n in primer_sequence.notes) if primer_sequence.notes else 0)
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of step '
            'requested: Priming sequence length: %s, Generation length '
            'requested: %s', generate_section.start_time, total_seconds)
        return

    generator_options.args['temperature'].float_value = 1.0
    generator_options.args['beam_size'].int_value = 1
    generator_options.args['branch_factor'].int_value = 1
    generator_options.args['steps_per_iteration'].int_value = 1
    tf.logging.debug('primer_sequence: %s', primer_sequence)
    tf.logging.debug('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and return the output
    # to RheingoldGraph
    for i in range(config.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)
        yield generated_sequence

    tf.logging.info('Generated %d sequences.' % config.num_outputs)
Пример #15
0
    def test_create_midi_default(self):
        BUNDLE_NAME = 'attention_rnn'

        config = magenta.models.melody_rnn.melody_rnn_model.default_configs[
            BUNDLE_NAME]
        bundle_file = magenta.music.read_bundle_file(
            os.path.abspath(BUNDLE_NAME + '.mag'))
        steps_per_quarter = 4

        generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=steps_per_quarter,
            # checkpoint=get_checkpoint(),
            bundle=bundle_file)

        qpm = 120
        generator_options = generator_pb2.GeneratorOptions()
        total_seconds = 4.0

        primer_sequence = magenta.music.midi_file_to_sequence_proto(
            '180827_02_midi.mid')
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm

        generator_options = generator_pb2.GeneratorOptions()
        # Set the start time to begin on the next step after the last note ends.
        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)

        generator_options.generate_sections.add(start_time=last_end_time +
                                                _steps_to_seconds(1, qpm),
                                                end_time=total_seconds)
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)
        magenta.music.sequence_proto_to_midi_file(generated_sequence,
                                                  'new2.mid')

        tf.logging.info('Wrote %d MIDI files to %s', "1", "midi folder")
Пример #16
0
def generate(sequence: NoteSequence, name: str, bundle_filename: str,
             config_name: str, generation_start_time: float,
             generation_end_time: float):
    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = 1
    generator_options.generate_sections.add(start_time=generation_start_time,
                                            end_time=generation_end_time)
    sequence_generator = get_sequence_generator(name, bundle_filename,
                                                config_name)
    sequence = sequence_generator.generate(sequence, generator_options)
    sequence = ss.trim_note_sequence(sequence, generation_start_time,
                                     generation_end_time)
    return sequence
Пример #17
0
    def generate_1(self, duration):        
        # prepare the note sequence
        
        midi = notes_to_midi(self.notes, self.t0)
        primer_seq = magenta.music.midi_io.midi_to_note_sequence(midi)

        # predict the tempo

        if len(primer_seq.notes) > 4:
            estimated_tempo = midi.estimate_tempo()
            if estimated_tempo > 240:
                qpm = estimated_tempo / 2
            else:
                qpm = estimated_tempo
        else:
            qpm = 120
        
        primer_seq.tempos[0].qpm = qpm

        # generate
        
        gen_options = generator_pb2.GeneratorOptions()
        last_end_time = max(n.end_time for n in primer_seq.notes) if primer_seq.notes else 0
        gen_start_time = last_end_time + steps_to_seconds(1, qpm)
        gen_end_time = gen_start_time + duration
        gen_options.generate_sections.add(start_time = gen_start_time, end_time = gen_end_time)

        gen_seq = self.generator.generate(primer_seq, gen_options)
        gen_midi = magenta.music.midi_io.note_sequence_to_pretty_midi(gen_seq)

        # the primer sequence is included in the generated data, so strip it

        new_notes = []
        for note in gen_midi.instruments[0].notes:
            if note.start >= gen_start_time:
                new_note = pretty_midi.Note(
                    note.velocity,
                    note.pitch,
                    note.start - gen_start_time,
                    note.end - gen_start_time
                )
                new_notes.append(new_note)

        gen_midi.instruments[0].notes = new_notes
        gen_notes = midi_to_notes(gen_midi)

        # add the new notes to the play queue
        
        self.queue.put(gen_notes)
Пример #18
0
def run_with_flags(generator):
    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)
    if not tf.gfile.Exists(FLAGS.output_dir):
        tf.gfile.MakeDirs(FLAGS.output_dir)
    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
    if FLAGS.primer_melody:
        primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)
        elif primer_midi:
            primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
            if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
                qpm = primer_sequence.tempos[0].qpm
            else:
                tf.logging.warning('No priming sequence specified.')
                primer_melody = magenta.music.Melody([60])
                primer_sequence = primer_melody.to_sequence(qpm=qpm)
                seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
                total_seconds = FLAGS.num_steps * seconds_per_step
                generator_options = generator_pb2.GeneratorOptions()
            if primer_sequence:
                input_sequence = primer_sequence
                last_end_time = (max(n.end_time for n in primer_sequence.notes)if primer_sequence.notes else 0)
                generate_section = generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step,end_time=total_seconds)
            if generate_section.start_time >= generate_section.end_time:
                tf.logging.fatal('requested: Priming sequence length: %s, Generation length requested: %s',generate_section.start_time, total_seconds)
                return
            else:
                input_sequence = music_pb2.NoteSequence()
                input_sequence.tempos.add().qpm = qpm
                generate_section = generator_options.generate_sections.add(start_time=0,end_time=total_seconds)
                generator_options.args['temperature'].float_value = FLAGS.temperature
                generator_options.args['beam_size'].int_value = FLAGS.beam_size
                generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
                generator_options.args['steps_per_iteration'].int_value = FLAGS.steps_per_iteration
                tf.logging.debug('input_sequence: %s', input_sequence)
                tf.logging.debug('generator_options: %s', generator_options)
                date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
                digits = len(str(FLAGS.num_outputs))
                for i in range(FLAGS.num_outputs):
                    generated_sequence = generator.generate(input_sequence, generator_options)
                    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
                    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
                    magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
                    tf.logging.info('Wrote %d MIDI files to %s',FLAGS.num_outputs, FLAGS.output_dir)
Пример #19
0
def performance_rnn(record_file):
    start_time = 0.0
    timestep = 0.5
    end_time = 0.5
    melody_sequence = music_pb2.NoteSequence()
    with open(record_file, 'r') as record:
        for line in record:
            melody_sequence.notes.add(pitch=pitch_dict[line.strip()], \
                                start_time=start_time, end_time=end_time, velocity=80)
            start_time += timestep
            end_time += timestep
    melody_sequence.total_time = end_time
    melody_sequence.tempos.add(qpm=60)

    input_sequence = melody_sequence
    num_steps = 8192  # change this for shorter or longer sequences
    temperature = 1.0  # the higher the temperature the more random the sequence.

    bundle = mm.sequence_generator_bundle.read_bundle_file(
        '/home/ubuntu/team15/bundle/performance_with_dynamics.mag')
    generator_map = performance_sequence_generator.get_generator_map()
    generator = generator_map['performance_with_dynamics'](checkpoint=None,
                                                           bundle=bundle)
    generator.initialize()

    # Derive the total number of seconds to generate.
    seconds_per_step = 1.0 / generator.steps_per_second
    generate_end_time = num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=input_sequence.total_time, end_time=generate_end_time)

    generator_options.args[
        'temperature'].float_value = 1.0  # Higher is more random; 1.0 is default.

    sequence = generator.generate(input_sequence, generator_options)

    new_file_name = record_file.split('/')[-1].split(
        '.')[0] + '_performance_rnn.mid'
    mm.sequence_proto_to_midi_file(
        sequence, '/home/ubuntu/team15/midi/performance_rnn/' + new_file_name)
    return new_file_name
Пример #20
0
  def _generate(self, input_sequence, zero_time, response_start_time,
                response_end_time):
    """Generates a response sequence with the currently-selected generator.

    Args:
      input_sequence: The NoteSequence to use as a generation seed.
      zero_time: The float time in seconds to treat as the start of the input.
      response_start_time: The float time in seconds for the start of
          generation.
      response_end_time: The float time in seconds for the end of generation.

    Returns:
      The generated NoteSequence.
    """
    # Generation is simplified if we always start at 0 time.
    response_start_time -= zero_time
    response_end_time -= zero_time

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.input_sections.add(
        start_time=0,
        end_time=response_start_time)
    generator_options.generate_sections.add(
        start_time=response_start_time,
        end_time=response_end_time)

    # Get current temperature setting.
    generator_options.args['temperature'].float_value = self._temperature

    # Generate response.
    tf.logging.info(
        "Generating sequence using '%s' generator.",
        self._sequence_generator.details.id)
    tf.logging.debug('Generator Details: %s',
                     self._sequence_generator.details)
    tf.logging.debug('Bundle Details: %s',
                     self._sequence_generator.bundle_details)
    tf.logging.debug('Generator Options: %s', generator_options)
    response_sequence = self._sequence_generator.generate(
        adjust_sequence_times(input_sequence, -zero_time), generator_options)
    response_sequence = magenta.music.trim_note_sequence(
        response_sequence, response_start_time, response_end_time)
    return adjust_sequence_times(response_sequence, zero_time)
Пример #21
0
    def generate_melody(self, input_sequence):
        """Calls the SequenceGenerator and returns the generated NoteSequence."""
        # TODO(fjord): Align generation time on a measure boundary.
        notes_by_end_time = sorted(input_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0

        # Assume 4/4 time signature and a single tempo.
        qpm = input_sequence.tempos[0].qpm
        seconds_to_generate = (60.0 / qpm) * 4 * self._num_bars_to_generate

        generator_options = generator_pb2.GeneratorOptions()
        section = generator_options.generate_sections.add()
        # Start generating 1 quarter note after the sequence ends.
        section.start_time_seconds = last_end_time + (60.0 / qpm)
        section.end_time_seconds = section.start_time_seconds + seconds_to_generate

        return self._generator.generate(input_sequence, generator_options)
Пример #22
0
def extendDrums():
    dt = createDrumTrack()
    print("Initializing Drums RNN...")
    bundle = sequence_generator_bundle.read_bundle_file('drum_kit_rnn.mag')
    generator_map = drums_rnn_sequence_generator.get_generator_map()
    drum_rnn = generator_map['drum_kit'](checkpoint=None, bundle=bundle)
    drum_rnn.initialize()

    input_sequence = drums # change this to teapot if you want
    num_steps = 128 # change this for shorter or longer sequences
    temperature = 1.0 # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    # new_start_time =
    generator_options = generator_pb2.GeneratorOptions()
    # generator_options.args['primer_sequence'].string_value = "drums.mid"
    generate_section = generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    drum_rnn.generateDrumTrack(128, )
Пример #23
0
def generate_midi(midi_data, total_seconds=10):
    primer_sequence = magenta.music.midi_io.midi_to_sequence_proto(midi_data)

    # For now, assume 120 quavers per minute
    qpm = 120
    steps = 1
    primer_sequence.tempos[0].qpm = qpm

    generator_options = generator_pb2.GeneratorOptions()
    last_end_time = (max(
        n.end_time
        for n in primer_sequence.notes) if primer_sequence.notes else 0)

    starting_time = last_end_time + 1 * 60.0 / qpm / steps_per_quarter
    generator_options.generate_sections.add(start_time=starting_time,
                                            end_time=starting_time +
                                            total_seconds)
    generated_sequence = generator.generate(primer_sequence, generator_options)

    output = tempfile.NamedTemporaryFile()
    magenta.music.midi_io.sequence_proto_to_midi_file(generated_sequence,
                                                      output.name)
    output.seek(0)
    return pretty_midi.PrettyMIDI(output)
Пример #24
0
def generate_sequence(input_sequence):
    rnn_model = _init_generator()

    num_steps = 20  # change this for shorter or longer sequences
    temperature = 1.2  # the higher the temperature the more random the sequence.

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature

    last_end_time = max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / rnn_model.steps_per_quarter
    start_time = last_end_time + seconds_per_step
    generation_seconds = num_steps * seconds_per_step

    generator_options.generate_sections.add(start_time=start_time,
                                            end_time=start_time +
                                            generation_seconds)

    sequence = rnn_model.generate(input_sequence, generator_options)

    return sequence
Пример #25
0
    def run(self):
        """The main loop for a real-time call and response interaction.

    Alternates between receiving input from the MidiHub ("call" phase) and
    playing generated sequences ("response" phase). During the call phase, the
    input is captured and used to generate the response, which is then played
    back during the response phase.
    """
        # We measure time in units of quarter notes.
        quarter_duration = 60.0 / self._qpm
        # Start time in quarter notes from the epoch.
        start_quarters = (time.time() + 1.0) // quarter_duration

        # The number of notes before call phase ends to start generation for
        # response phase. Will be automatically adjusted to be as small as possible
        # while avoiding late response starts.
        predictahead_quarters = 1

        # Offset to beginning of call phase from start_quarters.
        call_offset_quarters = 0

        while not self._stop_signal.is_set():
            # Call phase.

            # Call phase start in quarter notes from the epoch.
            call_start_quarters = start_quarters + call_offset_quarters
            # Start the metronome at the beginning of the call phase.
            self._midi_hub.start_metronome(
                self._qpm, call_start_quarters * quarter_duration)

            # Start a captor at the beginning of the call phase.
            captor = self._midi_hub.start_capture(
                self._qpm, call_start_quarters * quarter_duration)

            if self._phase_bars is not None:
                # The duration of the call phase in quarter notes.
                call_quarters = self._phase_bars * self._quarters_per_bar
                # The duration of the capture in quarter notes.
                capture_quarters = call_quarters - predictahead_quarters
            else:
                # Wait for end signal.
                self._midi_hub.wait_for_event(self._end_call_signal)
                # The duration of the call phase in quarter notes.
                # We end the call phase at the end of the next bar that is at least
                # `predicathead_quarters` in the future.
                call_quarters = time.time(
                ) // quarter_duration - call_start_quarters
                remaining_call_quarters = -call_quarters % self._quarters_per_bar
                if remaining_call_quarters < predictahead_quarters:
                    remaining_call_quarters += self._quarters_per_bar
                call_quarters += remaining_call_quarters
                # The duration of the capture in quarter notes.
                capture_quarters = call_quarters - predictahead_quarters

            # Set the metronome to stop at the appropriate time.
            self._midi_hub.stop_metronome(
                (call_quarters + call_start_quarters) * quarter_duration,
                block=False)

            # Stop the and captor at the appropriate time.
            captor.stop(stop_time=((call_start_quarters + capture_quarters) *
                                   quarter_duration))
            captured_sequence = captor.captured_sequence()

            # Check to see if a stop has been requested during capture.
            if self._stop_signal.is_set():
                break

            # Set times in `captured_sequence` so that the call start is at 0.
            adjust_times(captured_sequence,
                         -(call_start_quarters * quarter_duration))

            # Generate sequence.
            response_start_quarters = call_quarters
            response_end_quarters = 2 * call_quarters

            generator_options = generator_pb2.GeneratorOptions()
            generator_options.generate_sections.add(
                start_time_seconds=response_start_quarters * quarter_duration,
                end_time_seconds=response_end_quarters * quarter_duration)

            # Generate response.
            response_sequence = self._sequence_generator.generate(
                captured_sequence, generator_options)

            # Set times in `captured_sequence` back to the wall times.
            adjust_times(response_sequence,
                         call_start_quarters * quarter_duration)

            # Check to see if a stop has been requested during generation.
            if self._stop_signal.is_set():
                break

            # Response phase.
            # Start response playback.
            self._midi_hub.start_playback(response_sequence)

            # Compute and log delta time between end of accompaniment before update
            # when the extension generation completed..
            remaining_time = ((response_start_quarters + call_start_quarters) *
                              quarter_duration - time.time())
            if remaining_time > (predictahead_quarters * quarter_duration):
                predictahead_quarters -= 1
                tf.logging.info(
                    'Generator is ahead by %.3f seconds. '
                    'Decreasing predictahead_quarters to %d.', remaining_time,
                    predictahead_quarters)
            elif remaining_time < 0:
                predictahead_quarters += 1
                tf.logging.info(
                    'Generator is lagging by %.3f seconds. '
                    'Increasing predictahead_quarters to %d.', -remaining_time,
                    predictahead_quarters)

            call_offset_quarters += response_end_quarters
def run_with_flags(generator):
  """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The MelodyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to MELODY_START.')
    primer_melody = magenta.music.Melody([constants.MELODY_START])
    primer_sequence = primer_melody.to_sequence(qpm=qpm)

  # Derive the total number of seconds to generate based on the QPM of the
  # priming sequence and the num_steps flag.
  total_seconds = _steps_to_seconds(FLAGS.num_steps, qpm)

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  if primer_sequence:
    input_sequence = primer_sequence
    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + _steps_to_seconds(1, qpm),
        end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
      tf.logging.fatal(
          'Priming sequence is longer than the total number of steps '
          'requested: Priming sequence length: %s, Generation length '
          'requested: %s',
          generate_section.start_time, total_seconds)
      return
  else:
    input_sequence = music_pb2.NoteSequence()
    input_sequence.tempos.add().qpm = qpm
    generate_section = generator_options.generate_sections.add(
        start_time=0,
        end_time=total_seconds)
  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
  tf.logging.debug('input_sequence: %s', input_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Prepare initial state
  initial_state = None
  if FLAGS.record_a is not None:
    id_file = os.path.expanduser(FLAGS.id_file)
    last_line = subprocess.check_output(['tail', '-1', id_file])
    num_records = int(last_line.split(',')[0]) + 1
    print '%d records in total' % num_records

    embedding_file = os.path.join(os.path.join(os.path.expanduser(FLAGS.run_dir), 'train'), 'embedding.npy')
    embedding = np.memmap(embedding_file, dtype='float32', mode='r')
    assert(embedding.shape[0] % num_records == 0)
    embedding.shape = (num_records, embedding.shape[0] / num_records)

    initial_state = np.copy(embedding[FLAGS.record_a, :])
    if FLAGS.record_b is not None:
      initial_state += embedding[FLAGS.record_b, :]
      initial_state /= 2.

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(input_sequence, generator_options, initial_state=initial_state)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
    magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, FLAGS.output_dir)
def run_with_flags(generator):
    """Generates pianoroll tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PianorollRnnNadeSequenceGenerator to use for generation.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return
    output_dir = os.path.expanduser(FLAGS.output_dir)

    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)

    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else 60
    if FLAGS.primer_pitches:
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.tempos.add().qpm = qpm
        primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        for pitch in ast.literal_eval(FLAGS.primer_pitches):
            note = primer_sequence.notes.add()
            note.start_time = 0
            note.end_time = 60.0 / qpm
            note.pitch = pitch
            note.velocity = 100
        primer_sequence.total_time = primer_sequence.notes[-1].end_time
    elif FLAGS.primer_pianoroll:
        primer_pianoroll = magenta.music.PianorollSequence(
            events_list=ast.literal_eval(FLAGS.primer_pianoroll),
            steps_per_quarter=4,
            shift_range=True)
        primer_sequence = primer_pianoroll.to_sequence(qpm=qpm)
    elif primer_midi:
        primer_sequence = magenta.music.midi_file_to_sequence_proto(
            primer_midi)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm
    else:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to empty sequence.')
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.tempos.add().qpm = qpm
        primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ

    # Derive the total number of seconds to generate.
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    generate_end_time = FLAGS.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=primer_sequence.total_time, end_time=generate_end_time)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Total length '
            'requested: %s', generate_section.start_time, generate_end_time)
        return

    generator_options.args['beam_size'].int_value = FLAGS.beam_size
    generator_options.args['branch_factor'].int_value = FLAGS.branch_factor

    tf.logging.info('primer_sequence: %s', primer_sequence)
    tf.logging.info('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(output_dir, midi_filename)
        magenta.music.sequence_proto_to_midi_file(generated_sequence,
                                                  midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
Пример #28
0
def generate(unused_argv):
  """Generates a basic drum sequence of 4 seconds based on a hard coded
  primer"""

  # TODO doc

  mm.notebook_utils.download_bundle("drum_kit_rnn.mag", "bundles")
  bundle = mm.sequence_generator_bundle.read_bundle_file(
    os.path.join("bundles", "drum_kit_rnn.mag"))

  generator_map = drums_rnn_sequence_generator.get_generator_map()
  generator = generator_map["drum_kit"](checkpoint=None, bundle=bundle)
  generator.initialize()

  qpm = 120
  num_bars = 3
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  num_steps_per_bar = constants.DEFAULT_STEPS_PER_BAR
  seconds_per_bar = num_steps_per_bar * seconds_per_step

  # Creates a primer sequence that is fed into the model for the generator,
  # which will generate a sequence based on this one
  # A DrumTrack models a drum sequence by step, so you have step 1 being the
  # midi note 36 (bass drum), followed by 3 steps of silence (those four steps
  # constitutes the first beat or quarter), followed by both notes 36 and 41
  # being struck at the same time (followed by silence by these are optional)
  primer_drums = mm.DrumTrack(
    [frozenset(pitches) for pitches in
     [(38, 51), (), (36,), (),
      (38, 44, 51), (), (36,), (),
      (), (), (38,), (),
      (38, 44), (), (36, 51), (), ]])
  primer_sequence = primer_drums.to_sequence(qpm=qpm)
  primer_start_time = 0
  primer_end_time = primer_start_time + seconds_per_bar

  # Defines the start and end of the generation, which starts at the step
  # after the end of the primer (we'll see in 03.py this calculation makes
  # it harder to fall on proper beats) and ends at total seconds
  # The complete generation will thus contain the primer and the total length
  # needs to be at least the size of the primer
  # TODO doc
  generation_start_time = primer_end_time
  generation_end_time = generation_start_time + (seconds_per_bar * num_bars)

  generator_options = generator_pb2.GeneratorOptions()
  generator_options.args['temperature'].float_value = 1.1
  generator_options.generate_sections.add(
    start_time=generation_start_time,
    end_time=generation_end_time)

  # We are using the primer sequence here instead of an empty sequence
  sequence = generator.generate(primer_sequence, generator_options)

  # TODO doc
  plot_file = os.path.join("output", "out.html")
  print("Generated plot file: " + str(os.path.abspath(plot_file)))
  pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
  plotter = Plotter()
  plotter.show(pretty_midi, plot_file)

  # TODO doc
  input_ports = [name for name in mido.get_output_names()
                 if "VirtualMIDISynth" in name
                 or "FLUID Synth" in name]
  if not input_ports:
    print("Cannot find proper input port in "
          + str(mido.get_output_names()))
  print("Playing generated MIDI in input port names: "
        + str(input_ports))
  midi_hub = mh.MidiHub([], input_ports, None)

  # TODO doc
  empty_sequence = music_pb2.NoteSequence()
  player = midi_hub.start_playback(empty_sequence,
                                   allow_updates=True)
  player._channel = 9

  # TODO doc
  wall_start_time = time.time()
  sequence_adjusted = music_pb2.NoteSequence()
  sequence_adjusted.CopyFrom(sequence)
  sequence_adjusted = adjust_sequence_times(sequence_adjusted,
                                            wall_start_time)
  player.update_sequence(sequence_adjusted,
                         start_time=wall_start_time)

  # TODO doc
  try:
    player.join(generation_end_time)
  except KeyboardInterrupt:
    return 0
  finally:
    return 0
Пример #29
0
def generate_music():
    global PLAYING_MUSIC1
    global PLAYING_MUSIC2
    mm.musicxml_parser.DEFAULT_MIDI_PROGRAM = 3
    mm.notebook_utils.download_bundle(BUNDLE_NAME, BUNDLE_DIR)
    bundle = mm.sequence_generator_bundle.read_bundle_file(
        os.path.join(BUNDLE_DIR, BUNDLE_NAME))
    generator_map = performance_sequence_generator.get_generator_map()
    generator = generator_map[MODEL_NAME](checkpoint=None, bundle=bundle)
    generator.initialize()
    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args[
        'temperature'].float_value = settings.TEMP  # Higher is more random; 1.0 is default.
    generate_section = generator_options.generate_sections.add(
        start_time=0, end_time=DURATION)
    sequence = generator.generate(music_pb2.NoteSequence(), generator_options)

    # Play and view this masterpiece.
    # mm.plot_sequence(sequence)
    # audio_object = modified_play_sequence(sequence, mm.midi_synth.fluidsynth, sample_rate=DEFAULT_SAMPLE_RATE * SAMPLE_MULT)
    # mm.play_sequence(sequence, mm.midi_synth.fluidsynth, sample_rate=DEFAULT_SAMPLE_RATE * SAMPLE_MULT)

    sample_rate = DEFAULT_SAMPLE_RATE * SAMPLE_MULT
    array_of_floats = mm.midi_synth.fluidsynth(sequence,
                                               sample_rate=sample_rate,
                                               sf2_path=SF2_PATH)
    write('/Users/wangan/Documents/calhacks2017/temp_wav/tempBass1.wav', 44100,
          array_of_floats)
    # audio_killed = int(sample_rate * 1)
    # array_of_floats = array_of_floats[audio_killed:]

    # IPython.display.Audio(array_of_floats, rate=sample_rate, autoplay=True)

    i = 1
    while (True):
        print("Size of NoteSequence: ", sys.getsizeof(sequence))
        performance_sequence_generator.DEFAULT_NOTE_DENSITY = settings.NOTE_DENSITY / REDUCTION
        performance_sequence_generator.DEFAULT_PITCH_HISTOGRAM = settings.PITCH
        while PLAYING_MUSIC1:
            time.sleep(0.00001)
        array_of_floats = []
        generator_map2 = performance_sequence_generator.get_generator_map()
        generator2 = generator_map2[MODEL_NAME](checkpoint=None, bundle=bundle)
        generator2.initialize()
        generator_options2 = generator_pb2.GeneratorOptions()
        generator_options2.args[
            'temperature'].float_value = settings.TEMP  # Higher is more random; 1.0 is default.
        generate_section = generator_options2.generate_sections.add(
            start_time=(i * DURATION), end_time=(i + 1) * DURATION)
        sequenceNew = generator2.generate(sequence, generator_options2)

        # Play and view this masterpiece.
        mm.plot_sequence(sequenceNew)
        # audio_object = modified_play_sequence(sequence, mm.midi_synth.fluidsynth, sample_rate=DEFAULT_SAMPLE_RATE * SAMPLE_MULT)
        # mm.play_sequence(sequence, mm.midi_synth.fluidsynth, sample_rate=DEFAULT_SAMPLE_RATE * SAMPLE_MULT)

        sample_rate = DEFAULT_SAMPLE_RATE * SAMPLE_MULT
        array_of_floats = mm.midi_synth.fluidsynth(sequenceNew,
                                                   sample_rate=sample_rate,
                                                   sf2_path=SF2_PATH)
        sequence = sequenceNew
        sequence.notes._values = sequence.notes._values[
            int(FRACTION * len(sequence.notes)):len(sequence.notes)]

        # audio_killed = int(sample_rate * 1.5)
        old_array_size = int(sample_rate * DURATION * i)
        array_of_floats = array_of_floats[old_array_size:]
        # array_of_floats = array_of_floats[:-audio_killed]
        write('/Users/wangan/Documents/calhacks2017/temp_wav/tempBass1.wav',
              44100, array_of_floats)
        del generator_map2
        del generator2
        del generator_options2
        del generate_section
        del sequenceNew
        i += 1

        performance_sequence_generator.DEFAULT_NOTE_DENSITY = settings.NOTE_DENSITY / REDUCTION
        performance_sequence_generator.DEFAULT_PITCH_HISTOGRAM = settings.PITCH
        while PLAYING_MUSIC2:
            time.sleep(0.00001)
        array_of_floats2 = []
        generator_map2 = performance_sequence_generator.get_generator_map()
        generator2 = generator_map2[MODEL_NAME](checkpoint=None, bundle=bundle)
        generator2.initialize()
        generator_options2 = generator_pb2.GeneratorOptions()
        generator_options2.args[
            'temperature'].float_value = settings.TEMP  # Higher is more random; 1.0 is default.
        generate_section = generator_options2.generate_sections.add(
            start_time=(i * DURATION), end_time=(i + 1) * DURATION)
        sequenceNew = generator2.generate(sequence, generator_options2)

        sample_rate = DEFAULT_SAMPLE_RATE * SAMPLE_MULT
        array_of_floats2 = mm.midi_synth.fluidsynth(sequenceNew,
                                                    sample_rate=sample_rate,
                                                    sf2_path=SF2_PATH)
        sequence = sequenceNew
        sequence.notes._values = sequence.notes._values[
            int(FRACTION * len(sequence.notes)):len(sequence.notes)]

        # audio_killed = int(sample_rate * 1.5)
        old_array_size = int(sample_rate * DURATION * i)
        array_of_floats2 = array_of_floats2[old_array_size:]
        # array_of_floats = array_of_floats[:-audio_killed]

        write('/Users/wangan/Documents/calhacks2017/temp_wav/tempBass2.wav',
              44100, array_of_floats2)
        del generator_map2
        del generator2
        del generator_options2
        del generate_section
        del sequenceNew
        i += 1
Пример #30
0
def run_with_flags(generator):
  """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The MelodyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to a single middle C.')
    primer_melody = magenta.music.Melody([60])
    primer_sequence = primer_melody.to_sequence(qpm=qpm)

  # Derive the total number of seconds to generate based on the QPM of the
  # priming sequence and the num_steps flag.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  total_seconds = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  if primer_sequence:
    input_sequence = primer_sequence
    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step,
        end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
      tf.logging.fatal(
          'Priming sequence is longer than the total number of steps '
          'requested: Priming sequence length: %s, Generation length '
          'requested: %s',
          generate_section.start_time, total_seconds)
      return
  else:
    input_sequence = music_pb2.NoteSequence()
    input_sequence.tempos.add().qpm = qpm
    generate_section = generator_options.generate_sections.add(
        start_time=0,
        end_time=total_seconds)
  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
  tf.logging.debug('input_sequence: %s', input_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(input_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
    magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, FLAGS.output_dir)