Пример #1
0
def extendTwinkle():
    twinkle_twinkle = createTwinkle()
    print("Initializing Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file('./basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    input_sequence = twinkle_twinkle # change this to teapot if you want
    num_steps = 128 # change this for shorter or longer sequences
    temperature = 1.0 # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in input_sequence.notes)
                      if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    sequence = melody_rnn.generate(input_sequence, generator_options)

    mm.sequence_proto_to_midi_file(sequence, 'twinkleExtended.mid')
Пример #2
0
def continueByModel(input, model):
    #download model
    if not (path.isfile(f'./content/{model}.mag')):
        print(
            f'Downloading {model} bundle. This will take less than a minute...'
        )
        notebook_utils.download_bundle(f'{model}.mag', './content/')
    #init
    bundle = sequence_generator_bundle.read_bundle_file(
        f'./content/{model}.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # do
    input_sequence = input
    num_steps = 128  # change this for shorter or longer sequences
    temperature = 10.0  # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    return melody_rnn.generate(input_sequence, generator_options)
Пример #3
0
def create_song_prototype(song_path,
                          start_time,
                          stop_time,
                          model_used='attention_rnn',
                          temperature=1.0):
    magenta_model_path = '%s/magenta_models/%s.mag' % (MEDIA_ROOT, model_used)
    bundle = mm.sequence_generator_bundle.read_bundle_file(magenta_model_path)
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model_used](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    base_sequence = midi_file_to_note_sequence(song_path)
    target_sequence = extract_subsequence(base_sequence, start_time, stop_time)

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generator_options.generate_sections.add(
        start_time=target_sequence.total_time,
        end_time=2 * target_sequence.total_time)
    generated_sequence = melody_rnn.generate(target_sequence,
                                             generator_options)

    proceed_sequence = extract_subsequence(generated_sequence,
                                           target_sequence.total_time,
                                           2 * target_sequence.total_time)

    return proceed_sequence
Пример #4
0
def generate_next():
    # Initialize the model.
    current_user_sequence.total_time = 8
    current_user_sequence.tempos.add(qpm=60)

    print("Initializing Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file('basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    print('🎉 Done!')

    input_sequence = current_user_sequence  # change this to teapot if you want
    num_steps = 128  # change this for shorter or longer sequences
    temperature = 1.0  # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    sequence = melody_rnn.generate(input_sequence, generator_options)
    toplay = sequence.notes[len(current_user_sequence.notes):]
    return toplay
Пример #5
0
def setup_model():
    # Initialize the model.
    print("Initializing attention Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file(
        'content/attention_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    app.melody_rnn = generator_map['attention_rnn'](checkpoint=None,
                                                    bundle=bundle)
    app.melody_rnn.initialize()
Пример #6
0
def init_melody_rnn(model_name):
    print("initializing generator...")
    bundle = sequence_generator_bundle.read_bundle_file('./content/' +
                                                        model_name + '.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model_name](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()
    print('🎉initializing done!')
    return melody_rnn
Пример #7
0
    def __init__(self):
        """
        Loads and initializes the rnn
        """
        print('Downloading model bundle. This will take less than a minute...')
        note_seq.notebook_utils.download_bundle('basic_rnn.mag', './content/')

        # Initialize the model.
        print("Initializing Melody RNN...")
        bundle = sequence_generator_bundle.read_bundle_file(
            './content/basic_rnn.mag')
        generator_map = melody_rnn_sequence_generator.get_generator_map()
        self.melody_rnn = generator_map['basic_rnn'](checkpoint=None,
                                                     bundle=bundle)
        self.melody_rnn.initialize()

        print('🎉 Done!')
Пример #8
0
    'bundle_files',
    None,
    'A comma-separated list of the location of the bundle files to use.')
tf.app.flags.DEFINE_integer(
    'generator_select_control_number',
    None,
    'The control number to use for selecting between generators when multiple '
    'bundle files are specified. Required unless only a single bundle file is '
    'specified.')
tf.app.flags.DEFINE_string(
    'log', 'WARN',
    'The threshold for what messages will be logged. DEBUG, INFO, WARN, ERROR, '
    'or FATAL.')

# A map from a string generator name to its class.
_GENERATOR_MAP = melody_rnn_sequence_generator.get_generator_map()


def _validate_flags():
  """Returns True if flag values are valid or prints error and returns False."""
  if FLAGS.list_ports:
    print "Input ports: '%s'" % (
        "', '".join(midi_hub.get_available_input_ports()))
    print "Ouput ports: '%s'" % (
        "', '".join(midi_hub.get_available_output_ports()))
    return False

  if FLAGS.bundle_files is None:
    print '--bundle_files must be specified.'
    return False
def listen_and_extend(chunk_duration,
                      min_volume,
                      min_rest,
                      rest_threshold,
                      mel_min=4,
                      rest_max=3,
                      sampling_rate=44100):
    chunksize = int(chunk_duration * sampling_rate)
    min_note_size = float(chunk_duration * 1.05)

    p = pyaudio.PyAudio()  # Initialize PyAudio object

    print(f"Recording audio in {chunk_duration} second chunks.")
    input("Press enter to proceed.")

    # Open stream with standard parameters
    stream = p.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=sampling_rate,
                    input=True,
                    frames_per_buffer=chunksize)

    # Run 4 processing steps: condense octaves, smooth repeats, remove errors, add rests
    pre_seq, full_raw = find_melody(chunksize, chunk_duration, sampling_rate,
                                    min_volume, stream)
    oct_seq = condense_octaves(copy.deepcopy(pre_seq))

    res = process_MIDI(copy.deepcopy(oct_seq), min_note_size)
    while not res[1]:
        res = process_MIDI(res[0], min_note_size)
    final_seq = res[0]

    samp_rest = find_rests(full_raw, rest_threshold)
    sec_rests = [(round(tup[0] / sampling_rate,
                        2), round(tup[1] / sampling_rate, 2))
                 for tup in samp_rest]
    sec_rests = [tup for tup in sec_rests if tup[1] - tup[0] > min_rest]

    rest_seq = []
    for note in final_seq:
        rest_seq = note.add_rests(sec_rests, rest_seq)

    # Cleanup
    stream.stop_stream()
    stream.close()
    p.terminate()

    # Plots the waveform and saves the result
    plt.plot(full_raw)
    plt.axhline(min_volume, color='r')
    plt.axhline(-min_volume, color='r')
    plt.title("Raw Microphone Input")
    plt.savefig("Output/Waveform.png")

    # Save MIDI plots and MIDI files
    save_sequence(pre_seq, 'pre')
    save_sequence(oct_seq, 'oct')
    save_sequence(final_seq, 'post')
    rest_mel = save_sequence(rest_seq, 'rest')

    # Initialize Model
    bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # Model Parameters
    end_time = (max(note.end_time for note in rest_mel.notes))
    qpm = rest_mel.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    steps = ((rest_mel.total_time * qpm * melody_rnn.steps_per_quarter) / 60)
    total = steps * seconds_per_step
    tmp = 1.0

    # Initialize Generator
    gen_options = generator_pb2.GeneratorOptions()
    gen_options.args['temperature'].float_value = tmp
    gen_section = gen_options.generate_sections.add(start_time=end_time +
                                                    seconds_per_step,
                                                    end_time=total)

    out = melody_rnn.generate(rest_mel, gen_options)

    note_seq.sequence_proto_to_midi_file(out, 'Output/ext_out.mid')
    ext = pretty_midi.PrettyMIDI('Output/ext_out.mid')
    visual_midi.Plotter().save(ext, 'Output/ext_plotted.html')

    return ext
Пример #10
0
global f
f = Figlet(font='slant')


import magenta
from magenta import music
# Import dependencies.
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2

# Initialize the model.
print("Initializing Melody RNN...")
bundle = sequence_generator_bundle.read_bundle_file('basic_rnn.mag')
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()
global temperature
temperature = 1.0

print(' Done!')

inport = mido.open_input('Axoloti Core:Axoloti Core MIDI 1 20:0')
# inport = mido.open_input('Midi Through:Midi Through Port-0 14:0')
outport = mido.open_output('Axoloti Core:Axoloti Core MIDI 1 20:0')
# outport = mido.open_output('Midi Through:Midi Through Port-0 14:0')
lasttime = time.time()

global mid
global track