def decode(self, event_ids, save_path=None):
        """
        Transform a sequence of event indices into a performance MIDI file.
        Args:
          event_ids: List of performance event indices.
        Returns:
          Path to the temporary file where the MIDI was saved.
        """
        performance = note_seq.performance_lib.Performance(
            quantized_sequence=None,
            steps_per_second=self._steps_per_second,
            num_velocity_bins=self._num_velocity_bins)

        tokens = []
        for i, event_id in enumerate(event_ids):
            if len(tokens) >= 2 and self.ids_to_events[tokens[-1]] == 'TIME_SHIFT_100' and self.ids_to_events[
                tokens[-1]] == 'TIME_SHIFT_100' and \
                    self.ids_to_events[event_id] == 'TIME_SHIFT_100':
                continue
            tokens.append(event_id)
            
            if event_id>1:
                performance.append(self.decode_event(event_id))
            
        ns = performance.to_sequence(max_note_duration=3)
        note_seq.sequence_proto_to_midi_file(ns, save_path)

        return save_path
Exemplo n.º 2
0
  def decode(self, ids, strip_extraneous=False):
    """Transform a sequence of event indices into a performance MIDI file.

    Args:
      ids: List of performance event indices.
      strip_extraneous: Whether to strip EOS and padding from the end of `ids`.

    Returns:
      Path to the temporary file where the MIDI was saved.
    """
    if strip_extraneous:
      ids = text_encoder.strip_ids(ids, list(range(self.num_reserved_ids)))

    # Decode indices corresponding to event n-grams back into the n-grams.
    event_ids = []
    for i in ids:
      if i >= self.unigram_vocab_size:
        event_ids += self._ngrams[i - self.unigram_vocab_size]
      else:
        event_ids.append(i)

    performance = note_seq.Performance(
        quantized_sequence=None,
        steps_per_second=self._steps_per_second,
        num_velocity_bins=self._num_velocity_bins)
    for i in event_ids:
      performance.append(self._encoding.decode_event(i - self.num_reserved_ids))

    ns = performance.to_sequence()

    _, tmp_file_path = tempfile.mkstemp('_decode.mid')
    note_seq.sequence_proto_to_midi_file(ns, tmp_file_path)

    return tmp_file_path
Exemplo n.º 3
0
def save_sequence(seq, prefix):
    seq.sort(key=operator.attrgetter('start'))
    mel = note_seq.protobuf.music_pb2.NoteSequence()

    for note in seq:
        mel.notes.add(pitch=note.midi, start_time=note.start,
                      end_time=note.end, velocity=80)
    mel.tempos.add(qpm=85)
    mel.total_time = seq[-1].end

    note_seq.sequence_proto_to_midi_file(mel, f'Output/{prefix}_out.mid')
    pre = pretty_midi.PrettyMIDI(f'Output/{prefix}_out.mid')
    visual_midi.Plotter().save(pre, f'Output/{prefix}_plotted.html')

    return mel
 def midi_quantizer(self, input_midi, output_midi):
     """
     Transform a MIDI filename into a list of performance event indices.
     Args:
       s: Path to the MIDI file.
     Returns:
       ids: List of performance event indices.
     """
     if input_midi:
         ns = note_seq.midi_file_to_sequence_proto(input_midi)
         ns = note_seq.sequences_lib.apply_sustain_control_changes(ns)
         del ns.control_changes[:]
     else:
         ns = note_seq.protobuf.music_pb2.NoteSequence()
     note_seq.sequence_proto_to_midi_file(ns, output_midi)
     return output_midi
Exemplo n.º 5
0
def main():
    test_target = './../midi_input/Anchor.mid'
    ns = note_seq.midi_file_to_note_sequence(test_target)
    file1 = open('./../1.txt', 'w+')
    file2 = open('./../2.txt', 'w+')
    target_instrument = skyline(ns)
    if target_instrument is None:
        print('No track selected')
    else:
        seq = get_new_ns(target_instrument, ns)
        file1.write(str(seq))
        note_seq.sequence_proto_to_midi_file(
            seq, './../midi_output/preprocess_output/out%s.mid' % '1')
        file2.write(
            str(
                note_seq.midi_file_to_note_sequence(
                    './../midi_output/preprocess_output/out%s.mid' % '1')))
Exemplo n.º 6
0
def sequence_midi_files_generative(input_dir: str, output_dir: str,
                                   model_file: str):
    model = utils.load_model(model_file)
    files = [
        file for file in os.listdir(input_dir) if file.lower().endswith('.mid')
    ]
    for file in files:
        print(f'Sequencing {file}...')
        notesequence = midi_file_to_note_sequence(f'{input_dir}/{file}')
        seq_arr = utils.seq_to_arr(notesequence, 8)
        num_features = len(seq_arr[0][1:])
        results = sequence_midi_file(seq_arr, model, num_features, 10)
        integrate_output(notesequence, results)
        sequence_proto_to_midi_file(notesequence,
                                    f'{output_dir}/{file[:-4]}_gen.mid')

    print('Done')
Exemplo n.º 7
0
    def decode(self, ids, strip_extraneous=False):
        """Transform a sequence of event indices into a performance MIDI file.

    Args:
      ids: List of performance event indices.
      strip_extraneous: Whether to strip EOS and padding from the end of `ids`.

    Returns:
      Path to the temporary file where the MIDI was saved.
    """
        ns = self.decode_to_note_sequence(ids,
                                          strip_extraneous=strip_extraneous)

        _, tmp_file_path = tempfile.mkstemp('_decode.mid')
        note_seq.sequence_proto_to_midi_file(ns, tmp_file_path)

        return tmp_file_path
Exemplo n.º 8
0
def sequence_midi_files_transform(input_dir: str, output_dir: str,
                                  model_file: str):
    model = utils.load_model(model_file)
    files = [
        file for file in os.listdir(input_dir) if file.lower().endswith('.mid')
    ]
    for file in files:
        print(f'Sequencing {file}...')
        notesequence = midi_file_to_note_sequence(f'{input_dir}/{file}')
        seq_arr = utils.seq_to_arr(notesequence, 8)
        num_features = len(seq_arr[0][1:])
        inputs = torch.tensor(np.array(seq_arr)[:,
                                                1:]).view(1, -1, num_features)
        inputs = inputs.cuda().float()
        with torch.no_grad():
            results = model(inputs)
        integrate_output(notesequence, results.view(-1).tolist())
        sequence_proto_to_midi_file(notesequence,
                                    f'{output_dir}/{file[:-4]}_trans.mid')
Exemplo n.º 9
0
  def testEncode(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,
        ngrams=[(277, 129)])

    ns = note_seq.NoteSequence()
    testing_lib.add_track_to_sequence(ns, 0, [(60, 97, 0.0, 1.0)])

    # Write NoteSequence to MIDI file as encoder takes in filename.
    with tempfile.NamedTemporaryFile(suffix='.mid') as f:
      note_seq.sequence_proto_to_midi_file(ns, f.name)
      ids = encoder.encode(f.name)

    expected_ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        310   # TIME-SHIFT(100), NOTE-OFF(60)
    ]

    self.assertEqual(expected_ids, ids)
 def _check_extract_examples(input_ns, path, input_number):
   """Make sure each input returns exactly one example from the converter."""
   tensors = config.data_converter.to_tensors(input_ns).outputs
   if not tensors:
     print(
         'MusicVAE configs have very specific input requirements. Could not '
         'extract any valid inputs from `%s`. Try another MIDI file.' % path)
     sys.exit()
   elif len(tensors) > 1:
     basename = os.path.join(
         FLAGS.output_dir,
         '%s_input%d-extractions_%s-*-of-%03d.mid' %
         (FLAGS.config, input_number, date_and_time, len(tensors)))
     for i, ns in enumerate(config.data_converter.from_tensors(tensors)):
       note_seq.sequence_proto_to_midi_file(
           ns, basename.replace('*', '%03d' % i))
     print(
         '%d valid inputs extracted from `%s`. Outputting these potential '
         'inputs as `%s`. Call script again with one of these instead.' %
         (len(tensors), path, basename))
     sys.exit()
Exemplo n.º 11
0
def decode_to_midi(target_directory,
                   trained_model,
                   length=256,
                   z_batch=None,
                   samples_per_batch=1,
                   temperature=0.5,
                   file_name=''):
    """ decode the generated z into note sequences

    Args:
        target_directory: the directory to hold the generated piece.
        trained_model: the trained model, may be loaded from checkpoints or
          use the music_vae.trained_model.TrainedModel method.
        length: pass
        z_batch: the input batch of z, each np.array generates one output.
        samples_per_batch: how many note sequences to generate for each z.
        temperature: softmax temperature used in model.decode.
        file_name: the file_name to attach to the front

    Return:
        a batch of note sequences.
    """
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    if z_batch is None:
        z_batch = []
    note_seq_batch = decode(trained_model, length, z_batch, samples_per_batch,
                            temperature)
    basename = os.path.join(
        target_directory, '%s_vae_output_%s_%03d_*.mid' %
        (file_name, date_and_time, samples_per_batch))
    output_file_paths = []
    for noteseq in note_seq_batch:
        i = 0
        for ns in noteseq:
            i = i + 1
            file_path = basename.replace('*', '%03d' % i)
            note_seq.sequence_proto_to_midi_file(ns, file_path)
            output_file_paths.append(file_path)
    return output_file_paths
Exemplo n.º 12
0
def extract_track(input_directory, file_name, output_directory):
    path_mid = input_directory + "/" + file_name
    dump_path = output_directory + "/" + file_name + ".tmp"
    # algo according to script.py
    try:
        # preprocess(path_mid, dump_path)
        # algo according to filters
        # ns = note_seq.midi_file_to_note_sequence(dump_path)
        ns = note_seq.midi_file_to_note_sequence(path_mid)
        new_ns = pm.get_new_ns(pm.skyline(ns, mode='time_first'), ns)
        # save the output
        save_path = output_directory + "/" + file_name
        note_seq.sequence_proto_to_midi_file(new_ns, save_path)
    except:
        try:
            ns = note_seq.midi_file_to_note_sequence(path_mid)
            new_ns = pm.get_new_ns(pm.skyline(ns, mode='variance_first'), ns)
            save_path = output_directory + "/" + file_name
            note_seq.sequence_proto_to_midi_file(new_ns, save_path)
        except:
            pass
    if os.path.exists(dump_path):
        os.remove(dump_path)
Exemplo n.º 13
0
print("Starting...")

# Creating Sequence (Melody A: C# Minor 4/4)
mel = note_seq.protobuf.music_pb2.NoteSequence()  # Initialize NoteSequence object
note_list = ((61, 0, 1), (61, 1, 1.5), (64, 1.5, 2), (66, 2, 2.5), (69, 2.5, 3),
             (68, 3, 4), (64, 4, 4.5), (66, 4.5, 5), (64, 5, 5.5), (63, 5.5, 6),
             (61, 6, 7), (60, 7, 8))  # List of notes in the form (freq, start, end)

for note in  note_list:  # Add all the notes
    mel.notes.add(pitch=note[0], start_time=note[1], end_time=note[2],
                  velocity=80)

mel.tempos.add(qpm=90)

#  Convert note_seq to MIDI for storage and playback
note_seq.sequence_proto_to_midi_file(mel, 'Input/in.mid')

# Import Dependencies
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2

# Initialize Model
bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')  # Loads model for use
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()

# Model Parameters
steps = 16
Exemplo n.º 14
0
def run_with_flags(generator):
  """Generates polyphonic tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PolyphonyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(output_dir):
    tf.gfile.MakeDirs(output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_pitches:
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
    for pitch in ast.literal_eval(FLAGS.primer_pitches):
      note = primer_sequence.notes.add()
      note.start_time = 0
      note.end_time = 60.0 / qpm
      note.pitch = pitch
      note.velocity = 100
    primer_sequence.total_time = primer_sequence.notes[-1].end_time
  elif FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to empty sequence.')
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

  # Derive the total number of seconds to generate.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  generate_end_time = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  # Set the start time to begin when the last note ends.
  generate_section = generator_options.generate_sections.add(
      start_time=primer_sequence.total_time,
      end_time=generate_end_time)

  if generate_section.start_time >= generate_section.end_time:
    tf.logging.fatal(
        'Priming sequence is longer than the total number of steps '
        'requested: Priming sequence length: %s, Total length '
        'requested: %s',
        generate_section.start_time, generate_end_time)
    return

  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

  generator_options.args['condition_on_primer'].bool_value = (
      FLAGS.condition_on_primer)
  generator_options.args['no_inject_primer_during_generation'].bool_value = (
      not FLAGS.inject_primer_during_generation)

  tf.logging.debug('primer_sequence: %s', primer_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(primer_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, output_dir)
Exemplo n.º 15
0
    def run(self):
        """Generates polyphonic tracks and saves them as MIDI files.
        Uses the options specified by the flags defined in this module.
        Args:
            generator: The PolyphonyRnnSequenceGenerator to use for generation.
        """
        primer_melody = self._get_primer_melody()

        output_dir = os.path.expanduser(MusicGeneratorSettings.output_dir)

        if not tf.gfile.Exists(output_dir):
            tf.gfile.MakeDirs(output_dir)

        for i in os.listdir(output_dir):
            os.remove(os.path.join(output_dir, i))

        primer_sequence = None
        qpm = MusicGeneratorSettings.qpm

        primer_melody = note_seq.Melody(ast.literal_eval(primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)

        # Derive the total number of seconds to generate.
        seconds_per_step = 60.0 / qpm / self.generator.steps_per_quarter
        generate_end_time = MusicGeneratorSettings.num_steps * seconds_per_step

        # Specify start/stop time for generation based on starting generation at the
        # end of the priming sequence and continuing until the sequence is num_steps
        # long.
        generator_options = generator_pb2.GeneratorOptions()
        # Set the start time to begin when the last note ends.
        generator_options.generate_sections.add(
            start_time=primer_sequence.total_time, end_time=generate_end_time)

        generator_options.args[
            'temperature'].float_value = MusicGeneratorSettings.temperature
        generator_options.args[
            'beam_size'].int_value = MusicGeneratorSettings.beam_size
        generator_options.args[
            'branch_factor'].int_value = MusicGeneratorSettings.branch_factor
        generator_options.args[
            'steps_per_iteration'].int_value = MusicGeneratorSettings.steps_per_iteration
        generator_options.args[
            'condition_on_primer'].bool_value = MusicGeneratorSettings.condition_on_primer
        generator_options.args[
            'no_inject_primer_during_generation'].bool_value = not MusicGeneratorSettings.inject_primer_during_generation

        # Make the generate request num_outputs times and save the output as midi
        # files.
        digits = len(str(MusicGeneratorSettings.num_outputs))
        for i in range(MusicGeneratorSettings.num_outputs):
            generated_sequence = self.generator.generate(
                primer_sequence, generator_options)
            midi_filename = str(i + 1).zfill(digits) + ".mid"
            midi_path = os.path.join(output_dir, midi_filename)
            note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

            if i == 0:
                threading.Thread(target=self.player.play).start()
            elif i == 1:
                self.player.enqueue(midi_path)

        tf.logging.info('Wrote %d MIDI files to %s',
                        MusicGeneratorSettings.num_outputs, output_dir)
def generate_midi(prime_loc, partial_loc, total_loc):

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        print('inside input_gen')
        # These values will be changed by subsequent cells.
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # initializing targets and decoder_length
    targets = []
    decode_length = 0

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())

    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    # convert our input midi to note sequence.
    prime_ns = note_seq.midi_file_to_note_sequence(prime_loc)

    # Handle sustain pedal in the primer.
    primer_ns = note_seq.apply_sustain_control_changes(prime_ns)

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, np.random.randint(0, 10) + len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    print('generating the continuation of the input midi')
    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = note_seq.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer.
    total_ns = note_seq.concatenate_sequences([primer_ns, ns])

    # saving our generated music for future reference
    note_seq.sequence_proto_to_midi_file(ns, partial_loc)
    note_seq.sequence_proto_to_midi_file(total_ns, total_loc)

    print('finished generating.... returning the final file')

    return partial_loc
def listen_and_extend(chunk_duration,
                      min_volume,
                      min_rest,
                      rest_threshold,
                      mel_min=4,
                      rest_max=3,
                      sampling_rate=44100):
    chunksize = int(chunk_duration * sampling_rate)
    min_note_size = float(chunk_duration * 1.05)

    p = pyaudio.PyAudio()  # Initialize PyAudio object

    print(f"Recording audio in {chunk_duration} second chunks.")
    input("Press enter to proceed.")

    # Open stream with standard parameters
    stream = p.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=sampling_rate,
                    input=True,
                    frames_per_buffer=chunksize)

    # Run 4 processing steps: condense octaves, smooth repeats, remove errors, add rests
    pre_seq, full_raw = find_melody(chunksize, chunk_duration, sampling_rate,
                                    min_volume, stream)
    oct_seq = condense_octaves(copy.deepcopy(pre_seq))

    res = process_MIDI(copy.deepcopy(oct_seq), min_note_size)
    while not res[1]:
        res = process_MIDI(res[0], min_note_size)
    final_seq = res[0]

    samp_rest = find_rests(full_raw, rest_threshold)
    sec_rests = [(round(tup[0] / sampling_rate,
                        2), round(tup[1] / sampling_rate, 2))
                 for tup in samp_rest]
    sec_rests = [tup for tup in sec_rests if tup[1] - tup[0] > min_rest]

    rest_seq = []
    for note in final_seq:
        rest_seq = note.add_rests(sec_rests, rest_seq)

    # Cleanup
    stream.stop_stream()
    stream.close()
    p.terminate()

    # Plots the waveform and saves the result
    plt.plot(full_raw)
    plt.axhline(min_volume, color='r')
    plt.axhline(-min_volume, color='r')
    plt.title("Raw Microphone Input")
    plt.savefig("Output/Waveform.png")

    # Save MIDI plots and MIDI files
    save_sequence(pre_seq, 'pre')
    save_sequence(oct_seq, 'oct')
    save_sequence(final_seq, 'post')
    rest_mel = save_sequence(rest_seq, 'rest')

    # Initialize Model
    bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # Model Parameters
    end_time = (max(note.end_time for note in rest_mel.notes))
    qpm = rest_mel.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    steps = ((rest_mel.total_time * qpm * melody_rnn.steps_per_quarter) / 60)
    total = steps * seconds_per_step
    tmp = 1.0

    # Initialize Generator
    gen_options = generator_pb2.GeneratorOptions()
    gen_options.args['temperature'].float_value = tmp
    gen_section = gen_options.generate_sections.add(start_time=end_time +
                                                    seconds_per_step,
                                                    end_time=total)

    out = melody_rnn.generate(rest_mel, gen_options)

    note_seq.sequence_proto_to_midi_file(out, 'Output/ext_out.mid')
    ext = pretty_midi.PrettyMIDI('Output/ext_out.mid')
    visual_midi.Plotter().save(ext, 'Output/ext_plotted.html')

    return ext
def run_with_flags(generator):
    """Generates performance tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PerformanceRnnSequenceGenerator to use for generation.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return
    output_dir = os.path.expanduser(FLAGS.output_dir)

    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)

    primer_sequence = None
    if FLAGS.primer_pitches:
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
        for pitch in ast.literal_eval(FLAGS.primer_pitches):
            note = primer_sequence.notes.add()
            note.start_time = 0
            note.end_time = 60.0 / note_seq.DEFAULT_QUARTERS_PER_MINUTE
            note.pitch = pitch
            note.velocity = 100
            primer_sequence.total_time = note.end_time
    elif FLAGS.primer_melody:
        primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence()
    elif primer_midi:
        primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    else:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to empty sequence.')
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

    # Derive the total number of seconds to generate.
    seconds_per_step = 1.0 / generator.steps_per_second
    generate_end_time = FLAGS.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=primer_sequence.total_time, end_time=generate_end_time)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Total length '
            'requested: %s', generate_section.start_time, generate_end_time)
        return

    for control_cls in note_seq.all_performance_control_signals:
        if FLAGS[control_cls.name].value is not None and (
                generator.control_signals is None
                or not any(control.name == control_cls.name
                           for control in generator.control_signals)):
            tf.logging.warning(
                'Control signal requested via flag, but generator is not set up to '
                'condition on this control signal. Request will be ignored: %s = %s',
                control_cls.name, FLAGS[control_cls.name].value)

    if (FLAGS.disable_conditioning is not None
            and not generator.optional_conditioning):
        tf.logging.warning(
            'Disable conditioning flag set, but generator is not set up for '
            'optional conditioning. Requested disable conditioning flag will be '
            'ignored: %s', FLAGS.disable_conditioning)

    if generator.control_signals:
        for control in generator.control_signals:
            if FLAGS[control.name].value is not None:
                generator_options.args[control.name].string_value = (
                    FLAGS[control.name].value)
    if FLAGS.disable_conditioning is not None:
        generator_options.args['disable_conditioning'].string_value = (
            FLAGS.disable_conditioning)

    generator_options.args['temperature'].float_value = FLAGS.temperature
    generator_options.args['beam_size'].int_value = FLAGS.beam_size
    generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
    generator_options.args[
        'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

    tf.logging.debug('primer_sequence: %s', primer_sequence)
    tf.logging.debug('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(output_dir, midi_filename)
        note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
Exemplo n.º 19
0
#The purpose of this file is to test whether the parsed data from magentaparser.py can be
#converted back into a midi sequence and exported as a midi file. This is purely to make sure
#that notes predicted by the model can be properly formatted such that it can also be
#converted back into a midi file.

import pandas as pd
import magenta
import note_seq
import os

directory = 'parsed_songs'

df = pd.read_csv(directory + '/Time.csv')

seq = note_seq.NoteSequence()

for i in range(len(list(df['velocity']))):
    seq.notes.add(pitch=df.at[i, 'pitch'],
                  velocity=df.at[i, 'velocity'],
                  start_time=df.at[i, 'start_time'],
                  end_time=df.at[i, 'end_time'])
tempo = df['tempo'].iloc[0]
seq.tempos.add(qpm=tempo)
note_seq.sequence_proto_to_midi_file(seq, 'songs_test/Tim_reconstruct.mid')
Exemplo n.º 20
0
            if final_seq:
                prev = next(note for note in final_seq if not note.finished)
                prev.finalize(cycles, CHUNK_DURATION)

            final_seq.append(new_note)

      if cycles == CYCLE_MAX - 1:
          final_seq[-1].finalize(cycles, CHUNK_DURATION)

        last_midi = midi
        cycles += 1

    except KeyboardInterrupt:
        break

mel = note_seq.protobuf.music_pb2.NoteSequence()  # Initialize NoteSequence object

for note in  final_seq:  # Add all the notes
    mel.notes.add(pitch=note.midi, start_time=note.start, end_time=note.end,
                  velocity=80)

note_seq.sequence_proto_to_midi_file(mel, 'Output/test_out.mid')

# Cleanup
wave.close()
stream.stop_stream()
stream.close()
p.terminate()

print("MIDI Sequence: ", seq)
Exemplo n.º 21
0
def run_with_flags(generator):
  """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The MelodyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to a single middle C.')
    primer_melody = note_seq.Melody([60])
    primer_sequence = primer_melody.to_sequence(qpm=qpm)

  # Derive the total number of seconds to generate based on the QPM of the
  # priming sequence and the num_steps flag.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  total_seconds = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  if primer_sequence:
    input_sequence = primer_sequence
    # Set the start time to begin on the next step after the last note ends.
    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step,
        end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
      tf.logging.fatal(
          'Priming sequence is longer than the total number of steps '
          'requested: Priming sequence length: %s, Generation length '
          'requested: %s',
          generate_section.start_time, total_seconds)
      return
  else:
    input_sequence = music_pb2.NoteSequence()
    input_sequence.tempos.add().qpm = qpm
    generate_section = generator_options.generate_sections.add(
        start_time=0,
        end_time=total_seconds)
  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
  tf.logging.debug('input_sequence: %s', input_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(input_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, FLAGS.output_dir)
# Save MIDI plots and MIDI files
save_sequence(pre_seq, 'pre')
save_sequence(oct_seq, 'oct')
save_sequence(final_seq, 'post')
rest_mel = save_sequence(rest_seq, 'rest')

# Initialize Model
bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()

# Model Parameters
steps = 16
tmp = 1.0

# Initialize Generator
final_seq.sort(key=operator.attrgetter('start'))
gen_options = generator_pb2.GeneratorOptions()
gen_options.args['temperature'].float_value = tmp
gen_section = gen_options.generate_sections.add(start_time=rest_seq[-1].end,
                                                end_time=(rest_seq[-1].end -
                                                rest_seq[1].start) * 2)

out = melody_rnn.generate(rest_mel, gen_options)

note_seq.sequence_proto_to_midi_file(out, 'Output/ext_out.mid')
ext = pretty_midi.PrettyMIDI('Output/ext_out.mid')
visual_midi.Plotter().save(ext, 'Output/ext_plotted.html')
encoder_decoder = encoder_decoder
num_sequences = t.song_count
vocab_size = t.vocab_size

#%%
# Test length of input target

x = training_set.inputs[0]
y = training_set.targets[0]

x1 = x.to_sequence()
y1 = y.to_sequence()

from loaders.dataloader_midi import get_instruments_from_NoteSequence

note_seq.sequence_proto_to_midi_file(x1, 'input_mel.mid')
note_seq.sequence_proto_to_midi_file(y1, 'target_bass.mid')

#%%
# Set seed
np.random.seed(42)

path_to_midi_dir = folder_name

#instruments_to_extract = (54, 34) # voice and bass
instruments_to_extract = (0, 1)  # voice and bass

# Hyper-parameters
num_epochs = 200
training_set, validation_set, test_set, tokenizer \
    = create_dataset_from_midi(path_to_midi_dir, instruments_to_extract, print_info=True)
Exemplo n.º 24
0
twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80) 
twinkle_twinkle.total_time = 8

twinkle_twinkle.tempos.add(qpm=60)

# Here's another NoteSequence!
teapot = music_pb2.NoteSequence()
teapot.notes.add(pitch=69, start_time=0, end_time=0.5, velocity=80)
teapot.notes.add(pitch=71, start_time=0.5, end_time=1, velocity=80)
teapot.notes.add(pitch=73, start_time=1, end_time=1.5, velocity=80)
teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80)
teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80)
teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80)
teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80)
teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80)
teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80)
teapot.total_time = 8

teapot.tempos.add(qpm=60)

note_seq.sequence_proto_to_midi_file(twinkle_twinkle, 'twinkle_twinkle.mid')
note_seq.sequence_proto_to_midi_file(teapot, 'teapot.mid')
def run(config_map):
  """Load model params, save config file and start trainer.

  Args:
    config_map: Dictionary mapping configuration name to Config object.

  Raises:
    ValueError: if required flags are missing or invalid.
  """
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

  if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
    raise ValueError(
        'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')
  if FLAGS.output_dir is None:
    raise ValueError('`--output_dir` is required.')
  tf.gfile.MakeDirs(FLAGS.output_dir)
  if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
    raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

  if FLAGS.config not in config_map:
    raise ValueError('Invalid config name: %s' % FLAGS.config)
  config = config_map[FLAGS.config]
  config.data_converter.max_tensors_per_item = None

  if FLAGS.mode == 'interpolate':
    if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
      raise ValueError(
          '`--input_midi_1` and `--input_midi_2` must be specified in '
          '`interpolate` mode.')
    input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
    input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
    if not os.path.exists(input_midi_1):
      raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
    if not os.path.exists(input_midi_2):
      raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
    input_1 = note_seq.midi_file_to_note_sequence(input_midi_1)
    input_2 = note_seq.midi_file_to_note_sequence(input_midi_2)

    def _check_extract_examples(input_ns, path, input_number):
      """Make sure each input returns exactly one example from the converter."""
      tensors = config.data_converter.to_tensors(input_ns).outputs
      if not tensors:
        print(
            'MusicVAE configs have very specific input requirements. Could not '
            'extract any valid inputs from `%s`. Try another MIDI file.' % path)
        sys.exit()
      elif len(tensors) > 1:
        basename = os.path.join(
            FLAGS.output_dir,
            '%s_input%d-extractions_%s-*-of-%03d.mid' %
            (FLAGS.config, input_number, date_and_time, len(tensors)))
        for i, ns in enumerate(config.data_converter.from_tensors(tensors)):
          note_seq.sequence_proto_to_midi_file(
              ns, basename.replace('*', '%03d' % i))
        print(
            '%d valid inputs extracted from `%s`. Outputting these potential '
            'inputs as `%s`. Call script again with one of these instead.' %
            (len(tensors), path, basename))
        sys.exit()
    logging.info(
        'Attempting to extract examples from input MIDIs using config `%s`...',
        FLAGS.config)
    _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
    _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

  logging.info('Loading model...')
  if FLAGS.run_dir:
    checkpoint_dir_or_path = os.path.expanduser(
        os.path.join(FLAGS.run_dir, 'train'))
  else:
    checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
  model = TrainedModel(
      config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
      checkpoint_dir_or_path=checkpoint_dir_or_path)

  if FLAGS.mode == 'interpolate':
    logging.info('Interpolating...')
    _, mu, _ = model.encode([input_1, input_2])
    z = np.array([
        _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])
    results = model.decode(
        length=config.hparams.max_seq_len,
        z=z,
        temperature=FLAGS.temperature)
  elif FLAGS.mode == 'sample':
    logging.info('Sampling...')
    results = model.sample(
        n=FLAGS.num_outputs,
        length=config.hparams.max_seq_len,
        temperature=FLAGS.temperature)

  basename = os.path.join(
      FLAGS.output_dir,
      '%s_%s_%s-*-of-%03d.mid' %
      (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
  logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
  for i, ns in enumerate(results):
    note_seq.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

  logging.info('Done.')
#@title Load Model
#@markdown The `ismir2021` model transcribes piano only, with note velocities.
#@markdown The `mt3` model transcribes multiple simultaneous instruments,
#@markdown but without velocities.

MODEL = "mt3" #@param["ismir2021", "mt3"]

checkpoint_path = f'/content/checkpoints/{MODEL}/'

inference_model = InferenceModel(checkpoint_path, MODEL)

#@title Upload Audio

audio = upload_audio(sample_rate=SAMPLE_RATE)
note_seq.notebook_utils.colab_play(audio, sample_rate=SAMPLE_RATE)

#@title Transcribe Audio
#@markdown This may take a few minutes depending on the length of the WAV file
#@markdown you uploaded.

est_ns = inference_model(audio)

note_seq.play_sequence(est_ns, synth=note_seq.fluidsynth, 
                       sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
note_seq.plot_sequence(est_ns)

#@title Download MIDI Transcription

note_seq.sequence_proto_to_midi_file(est_ns, '/tmp/transcribed.mid')
files.download('/tmp/transcribed.mid')
Exemplo n.º 27
0
    for idx, f in enumerate(tqdm(files)):
        midis.append(midi_io.midi_file_to_note_sequence(dest_folder + '/' + f))
        midis[idx].filename = f
        #midis = [midi_io.midi_file_to_note_sequence(dest_folder + '/' + f) for f in tqdm(files)]
    
#%%
import copy

k = copy.deepcopy(midis[0])
instruments, bass_idxs, melody_idxs = get_instruments_from_NoteSequence(k)


#%%
import note_seq
from note_seq.protobuf import music_pb2

new_tune = note_seq.protobuf.music_pb2.NoteSequence()
new_tune.tempos.add(qpm=k.tempos[0].qpm)
new_tune.filename = k.filename
for idx, note in enumerate(k.notes):
    if note.instrument in bass_idxs + melody_idxs:
        new_tune.notes.add(pitch=note.pitch,
                           start_time=note.start_time,
                           end_time=note.end_time,
                           velocity=note.velocity)


note_seq.sequence_proto_to_midi_file(new_tune, new_tune.filename)
#%%
        
# Decode to NoteSequence.
midi_filename = decode(
    sample_ids,
    encoder=unconditional_encoders['targets'])
unconditional_ns = note_seq.midi_file_to_note_sequence(midi_filename)

# Play and plot.
note_seq.play_sequence(
    unconditional_ns,
    synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
note_seq.plot_sequence(unconditional_ns)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).

note_seq.sequence_proto_to_midi_file(
    unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')

#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model.  We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
#@markdown Set `max_primer_seconds` below to trim the primer to a
#@markdown fixed number of seconds (this will have no effect if
#@markdown the primer is already shorter than `max_primer_seconds`).

filenames = {
    'C major arpeggio': '/content/c_major_arpeggio.mid',
    'C major scale': '/content/c_major_scale.mid',
    'Clair de Lune': '/content/clair_de_lune.mid',