def synth(self, i):
     """ Synthesizes audio from the sequence indexed at `i`.
     
     Args:
         seq_index: an integer indicating which sequence to 
             load (from a line number)
     """
     # Lower sound quality than `fluidsynth` with Yamaha C5 or other 
     # good SoundFont but good fallback if you don't have the SoundFont
     # mm.ntebook_utils.play_sequence(sequence)
         
     mm.play_sequence(self.as_note_seq[i], mm.midi_synth.fluidsynth,
                             sf2_path='./assets/Yamaha-C5-Salamander-JNv5.1.sf2')
Exemple #2
0
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle_twinkle.total_time = 8

twinkle_twinkle.tempos.add(qpm=60)

# This is a colab utility method that visualizes a NoteSequence.
mm.plot_sequence(twinkle_twinkle)

# This is a colab utility method that plays a NoteSequence.
mm.play_sequence(twinkle_twinkle, synth=mm.fluidsynth)

# mm.sequence_proto_to_midi_file(twinkle_twinkle, 'twinkle_twinkle.mid')

# Here's another NoteSequence!
teapot = music_pb2.NoteSequence()
teapot.notes.add(pitch=69, start_time=0, end_time=0.5, velocity=80)
teapot.notes.add(pitch=71, start_time=0.5, end_time=1, velocity=80)
teapot.notes.add(pitch=73, start_time=1, end_time=1.5, velocity=80)
teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80)
teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80)
teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80)
teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80)
teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80)
teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80)
teapot.total_time = 8
Exemple #3
0
def play(note_sequence):
    mm.play_sequence(note_sequence, synth=mm.fluidsynth)
Exemple #4
0
melody_rnn.initialize()

# print '🎉 Done!'

# Model options. Change these to get different generated sequences!
from test import twinkle_twinkle

input_sequence = twinkle_twinkle # change this to teapot if you want
num_steps = 128 # change this for shorter or longer sequences
temperature = 1.0 # the higher the temperature the more random the sequence.

# Set the start time to begin on the next step after the last note ends.
last_end_time = (max(n.end_time for n in input_sequence.notes)
                  if input_sequence.notes else 0)
qpm = input_sequence.tempos[0].qpm
seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
total_seconds = num_steps * seconds_per_step

generator_options = generator_pb2.GeneratorOptions()
generator_options.args['temperature'].float_value = temperature
generate_section = generator_options.generate_sections.add(
  start_time=last_end_time + seconds_per_step,
  end_time=total_seconds)

# Ask the model to continue the sequence.
sequence = melody_rnn.generate(input_sequence, generator_options)

mm.plot_sequence(sequence)
mm.play_sequence(sequence, synth=mm.fluidsynth)

mm.sequence_proto_to_midi_file(sequence, 'melody_sample_output.mid')
Exemple #5
0
    def _generate(self, input_sequence, zero_time, response_start_time,
                  response_end_time):
        """Generates a response sequence with the currently-selected generator.

    Args:
      input_sequence: The NoteSequence to use as a generation seed.
      zero_time: The float time in seconds to treat as the start of the input.
      response_start_time: The float time in seconds for the start of
          generation.
      response_end_time: The float time in seconds for the end of generation.

    Returns:
      The generated NoteSequence.
    """
        print('zero_time:' + str(zero_time))
        print('response_start_time:' + str(response_start_time))
        print('response_end_time:' + str(response_end_time))
        time_adjusted_input_sequence = adjust_sequence_times(
            input_sequence, -zero_time)
        MidiInteraction.count = MidiInteraction.count + 1
        # Generation is simplified if we always start at 0 time.
        response_start_time -= zero_time
        response_end_time -= zero_time

        generator_options = generator_pb2.GeneratorOptions()
        generator_options.input_sections.add(start_time=0,
                                             end_time=response_start_time)
        generator_options.generate_sections.add(start_time=response_start_time,
                                                end_time=response_end_time)

        # Get current temperature setting.
        generator_options.args['temperature'].float_value = self._temperature
        print('####################self._sequence_generator:' +
              str(self._sequence_generator) + '####################')

        if self._sequence_generator == 'Trio':
            Rootpath = '/Users/inhyukyee/repo/pregenerated/'
            midRootpath = ''
            wavFilepath = ''
            num_mid_file = 0
            if self._should_short:
                print('SHORT 8s')
                midRootpath = '/Users/inhyukyee/repo/pregenerated/8s/mid/'
                wavFilepath = '/Users/inhyukyee/repo/pregenerated/8s/wav/trio_merged.wav'
                num_mid_file = 4392
            else:
                print('LONG 32s')
                midRootpath = '/Users/inhyukyee/repo/pregenerated/32s/mid/'
                wavFilepath = '/Users/inhyukyee/repo/pregenerated/32s/wav/trio_merged.wav'
                num_mid_file = 10000

            targetNum = random.randint(1, num_mid_file)
            midFilepath = os.path.join(midRootpath, str(targetNum) + '.mid')
            print(midFilepath)
            #wavFilepath = os.path.join(wavRootpath, targetNum + '.wav')

            #command = 'sleep 1 && afplay ' + audio_file
            #proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

            response_sequence = self._getPianoTrackFromTrio(
                time_adjusted_input_sequence, midFilepath, midRootpath)

            diff = 0
            for i in range(0, len(response_sequence.notes)):
                if response_sequence.notes[
                        i].start_time < response_start_time and i == 0:
                    diff = response_start_time - response_sequence.notes[
                        i].start_time
                if diff == 0:
                    break
                response_sequence.notes[
                    i].start_time = response_sequence.notes[i].start_time + diff
                response_sequence.notes[
                    i].end_time = response_sequence.notes[i].end_time + diff

            f_response_sequence = adjust_sequence_times(
                response_sequence, zero_time)
            '''
      for note in f_response_sequence.notes:
        print(note)
      '''
            midi_data = pretty_midi.PrettyMIDI(midRootpath +
                                               'temp/trio_merged.mid')
            note_sequence = mm.midi_to_sequence_proto(midi_data)
            mm.play_sequence(note_sequence,
                             synth=mm.fluidsynth,
                             wavpath=wavFilepath)
            #proc = subprocess.Popen(['afplay', wavFilepath])
            command = 'sleep 1 && afplay ' + wavFilepath
            proc = subprocess.Popen(command,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    preexec_fn=os.setsid)
            #proc = self._popenAndCall(self._onSubProcessExit, command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
            pe = ProcElement(proc, time.time())
            self._procsQ.append(pe)
            return f_response_sequence

        elif self._sequence_generator == 'Multitracks':
            print('Multitracks!')

            #just get any melody track from Trio
            midRootpath = '/Users/inhyukyee/repo/pregenerated/32s/mid/'
            num_mid_file = 1458

            targetNum = random.randint(1, num_mid_file)
            midFilepath = os.path.join(midRootpath, str(targetNum) + '.mid')
            print(midFilepath)

            response_sequence = self._getPianoTrackFromTrio(
                time_adjusted_input_sequence, midFilepath, midRootpath)

            f_response_sequence = adjust_sequence_times(
                response_sequence, zero_time)
            #END:just get any melody track from Trio

            wavRootpath = '/Users/inhyukyee/repo/pregenerated/32s/multitracks_wav/'
            wavFilepath = wavRootpath + str(random.randint(
                0, num_mid_file)) + '.wav'
            print('wavFilepath:' + wavFilepath)
            #wavFilepath = os.path.join(wavRootpath, str(random.randint(0, num_mid_file)), '.wav')
            #wavFilepath = os.path.join(wavRootpath, '99.wav')
            '''
      midi_data = pretty_midi.PrettyMIDI(
        '/Users/inhyukyee/repo/pregenerated/32s/4tracks_mid/' + str(targetNum) + '.mid')
      note_sequence = mm.midi_to_sequence_proto(midi_data)
      mm.play_sequence(note_sequence, synth=mm.fluidsynth,
                       sf2_path='/Users/inhyukyee/Downloads/SGM-v2.01-Sal-Guit-Bass-V1.3.sf2', wavpath=wavFilepath)
      '''
            command = 'sleep 1 && afplay ' + wavFilepath
            proc = subprocess.Popen(command,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    preexec_fn=os.setsid)
            #proc = self._popenAndCall(self._onSubProcessExit, command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
            pe = ProcElement(proc, time.time())
            self._procsQ.append(pe)
            return f_response_sequence

        else:
            # Generate response.
            tf.logging.info("Generating sequence using '%s' generator.",
                            self._sequence_generator.details.id)
            tf.logging.debug('Generator Details: %s',
                             self._sequence_generator.details)
            tf.logging.debug('Bundle Details: %s',
                             self._sequence_generator.bundle_details)
            tf.logging.debug('Generator Options: %s', generator_options)
            response_sequence = self._sequence_generator.generate(
                adjust_sequence_times(input_sequence, -zero_time),
                generator_options)
            response_sequence = magenta.music.trim_note_sequence(
                response_sequence, response_start_time, response_end_time)
            final_response_sequence = adjust_sequence_times(
                response_sequence, zero_time)
            '''
      print('##############################final_response_sequence##############################')
      print('response_start_time:' + str(response_start_time))
      print('response_end_time:' + str(response_end_time))
      print('zero_time:' + str(zero_time))
      print(final_response_sequence)
      print('##############################final_response_sequence##############################')
      '''
            return final_response_sequence
babyshark = mm.midi_file_to_note_sequence('./mid/babyshark.mid')
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

# This gives us a list of sequences.
note_sequences = music_vae.interpolate(twinkle_twinkle,
                                       babyshark,
                                       num_steps=num_steps,
                                       length=8)

# Concatenate them into one long sequence, with the start and
# end sequences at each end.
interp_seq = mm.sequences_lib.concatenate_sequences(note_sequences)

mm.play_sequence(interp_seq, synth=mm.fluidsynth)
mm.plot_sequence(interp_seq)

mm.sequence_proto_to_midi_file(interp_seq, 'twinkle_shark.mid')
Exemple #7
0
targets = []
decode_length = 1024

# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']

# Decode to NoteSequence.
midi_filename = decode(
    sample_ids,
    encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)

# Play and plot.
mm.play_sequence(
    unconditional_ns,
    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).

mm.sequence_proto_to_midi_file(
    unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')

#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model.  We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
Exemple #8
0
def play(note_sequences):
  if not isinstance(note_sequences, list):
    note_sequences = [note_sequences]
  for ns in note_sequences:
    mm.play_sequence(ns, synth=mm.fluidsynth, sf2_path=SF2_PATH)
Exemple #9
0
import magenta.music as mm

# Necessary until pyfluidsynth is updated (>1.2.5).
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)

# Constants.
BUNDLE_DIR = '/tmp/'
MODEL_NAME = 'performance_with_dynamics'
BUNDLE_NAME = MODEL_NAME + '.mag'

mm.notebook_utils.download_bundle(BUNDLE_NAME, BUNDLE_DIR)

"""# Generate a sequence"""

bundle = mm.sequence_generator_bundle.read_bundle_file(os.path.join(BUNDLE_DIR, BUNDLE_NAME))
generator_map = performance_sequence_generator.get_generator_map()
generator = generator_map[MODEL_NAME](checkpoint=None, bundle=bundle)
generator.initialize()
generator_options = generator_pb2.GeneratorOptions()
generator_options.args['temperature'].float_value = 1.6 
generate_section = generator_options.generate_sections.add(start_time=0, end_time=60)
sequence = generator.generate(music_pb2.NoteSequence(), generator_options)

# Play and view this masterpiece.
mm.plot_sequence(sequence)
mm.play_sequence(sequence, mm.midi_synth.fluidsynth,
                 sf2_path='/tmp/Yamaha-C5-Salamander-JNv5.1.sf2')