Пример #1
0
def import_songs():
    from note_seq.protobuf import music_pb2
    count = 0
    midi1_set = glob.glob("./MidiSet1/*.mid")
    midi2_set = glob.glob("./MidiSet2/*.mid")
    midi3_set = glob.glob("./MidiSet3/*.mid")
    note1_set = []
    note2_set = []
    note3_set = []

    for x in midi1_set:
        sequence = note_seq.midi_file_to_note_sequence(midi1_set[count])
        note1_set.append(sequence)
        # note_seq.play_sequence(sequence, synth=note_seq.synthesize)
        count += 1
    count = 0
    for x in midi2_set:
        sequence = note_seq.midi_file_to_note_sequence(midi2_set[count])
        note2_set.append(sequence)
        # note_seq.play_sequence(sequence, synth=note_seq.synthesize)
        count += 1
    count = 0
    for x in midi3_set:
        sequence = note_seq.midi_file_to_note_sequence(midi3_set[count])
        note3_set.append(sequence)
        # note_seq.play_sequence(sequence, synth=note_seq.synthesize)
        count += 1
Пример #2
0
def main():
    path = "./midi_input/test1.mid"
    ns = note_seq.midi_file_to_note_sequence(path)
    music_vae_config_str = 'hierdec-mel_16bar'
    music_vae_checkpoint_dir = './../repository/musicvae_hierdec-mel_16bar'
    music_vae_model = generate_model(config_str=music_vae_config_str,
                                     checkpoint_dir=music_vae_checkpoint_dir)
    print(encode_ns(music_vae_model, ns))
Пример #3
0
def main():
    test_target = './../midi_input/Anchor.mid'
    ns = note_seq.midi_file_to_note_sequence(test_target)
    file1 = open('./../1.txt', 'w+')
    file2 = open('./../2.txt', 'w+')
    target_instrument = skyline(ns)
    if target_instrument is None:
        print('No track selected')
    else:
        seq = get_new_ns(target_instrument, ns)
        file1.write(str(seq))
        note_seq.sequence_proto_to_midi_file(
            seq, './../midi_output/preprocess_output/out%s.mid' % '1')
        file2.write(
            str(
                note_seq.midi_file_to_note_sequence(
                    './../midi_output/preprocess_output/out%s.mid' % '1')))
Пример #4
0
def convert_midi_files(args: tuple):
    input_dir, files = args
    sequences = []
    for file in files:
        print(f'Converting {file}...')
        filename = file[:-4]
        input_path = input_dir + '/' + file
        sequence = midi_file_to_note_sequence(input_path)
        sequences.append(sequence)
    return sequences
Пример #5
0
def extract_track(input_directory, file_name, output_directory):
    path_mid = input_directory + "/" + file_name
    dump_path = output_directory + "/" + file_name + ".tmp"
    # algo according to script.py
    try:
        # preprocess(path_mid, dump_path)
        # algo according to filters
        # ns = note_seq.midi_file_to_note_sequence(dump_path)
        ns = note_seq.midi_file_to_note_sequence(path_mid)
        new_ns = pm.get_new_ns(pm.skyline(ns, mode='time_first'), ns)
        # save the output
        save_path = output_directory + "/" + file_name
        note_seq.sequence_proto_to_midi_file(new_ns, save_path)
    except:
        try:
            ns = note_seq.midi_file_to_note_sequence(path_mid)
            new_ns = pm.get_new_ns(pm.skyline(ns, mode='variance_first'), ns)
            save_path = output_directory + "/" + file_name
            note_seq.sequence_proto_to_midi_file(new_ns, save_path)
        except:
            pass
    if os.path.exists(dump_path):
        os.remove(dump_path)
Пример #6
0
def detect_failure(file_path):
    """ detect whether a .mid file is broken

    Args:
        file_path: the path of the file. e.g., "D:/dataset/test.mid"

    Return:
        True if it is not broken
        False otherwise
    """
    try:
        ns = note_seq.midi_file_to_note_sequence(file_path)
        return True
    except:
        return False
Пример #7
0
def load_dataset(musicvae_model,
                 midi_directory="PATH",
                 pooling=True,
                 word_dict=word_dict,
                 max_num=1000):
    """ load midi dataset from a given directory

    Args:
        musicvae_model: the musicvae_model to load
        midi_directory: directory of midi files
        max_num: how many files to load
        word_dict: the dictionary of word from ppt
        pooling: whether use maximum pooling method

    Return:
        the x, y data ready for training
    """

    file_list = os.listdir(midi_directory)
    midi_wordvec_list = []
    midi_latentvec_list = []

    curr_num = 0
    for filename in file_list:
        if curr_num < max_num:
            midi_file = "%s/%s" % (midi_directory, filename)
            print(midi_file)
            if (".mid" in filename) or (".midi" in filename):
                file_valid, title = ppt.file_title(midi_file, word_dict)
                if file_valid:
                    midi_wordvec = bt.encode_nlp(title, pooling)
                    ns = note_seq.midi_file_to_note_sequence(midi_file)
                    # TODO: change this midi preprocessing method
                    # if True:
                    try:
                        new_ns = ppm.get_new_ns(ppm.skyline(ns), ns)
                        z_list, _, _ = lvg.encode_ns(musicvae_model, new_ns)
                        for z in z_list:
                            midi_latentvec_list.append(z)
                            midi_wordvec_list.append(midi_wordvec)
                        curr_num += 1
                        print("%04d/%04d: data loaded successfully at %s" %
                              (curr_num, max_num, filename))
                    except:
                        print("unable to load the file at %s" % filename)
                else:
                    print("invalid midi file at %s" % filename)
    return midi_wordvec_list, midi_latentvec_list
Пример #8
0
 def parse(self):
     for fil in self.files:
         seq = note_seq.midi_file_to_note_sequence(self.path + fil)
         notes = seq.notes
         messages = []
         messages.append(
             ['pitch', 'velocity', 'start_time', 'end_time', 'tempo'])
         for i in range(len(notes)):
             messages.append([
                 notes[i].pitch, notes[i].velocity, notes[i].start_time,
                 notes[i].end_time, seq.tempos[0].qpm
             ])
         with open('parsed_songs/' + fil[:len(fil) - 4] + '.csv',
                   'w+') as my_csv:
             csvWriter = csv.writer(my_csv, delimiter=',')
             csvWriter.writerows(messages)
Пример #9
0
def prepare_dataset(musicvae_model, dataset_path, stored_path, word_dict = word_dct, max_num = 1000):
    file_list = os.listdir(dataset_path)
    curr_num = 0
    for filename in file_list:
        if curr_num < max_num:
            midi_file = "%s/%s" % (dataset_path, filename)
            midi_store_path = "%s/%s" % (stored_path, os.path.splitext(filename)[0])
            print(midi_file)
            if (".mid" in filename) or (".midi" in filename):
                file_valid, title = ppt.file_title(midi_file, word_dict)
                if file_valid:
                    midi_wordvec = bt.encode_nlp(title)
                    ns = note_seq.midi_file_to_note_sequence(midi_file)
                    try:
                        if not os.path.exists(midi_store_path): 
                            os.mkdir(midi_store_path)
                        np.save("%s/name.npy" % midi_store_path, midi_wordvec)
                        new_ns = ppm.get_new_ns(ppm.skyline(ns), ns)
                        # TODO(wwh): check the effectiveness of not using filters
                        # z_list, mu_list, sigma_list = lvg.encode_ns(musicvae_model, new_ns)
                        z_list, mu_list, sigma_list = lvg.encode_ns(musicvae_model, ns)
                        i = 0
                        z_path = os.path.join(midi_store_path, 'z')
                        os.mkdir(z_path)
                        mu_path = os.path.join(midi_store_path, 'mu')
                        os.mkdir(mu_path)
                        sigma_path = os.path.join(midi_store_path, 'sigma')
                        os.mkdir(sigma_path)
                        for z in z_list:
                            np.save(os.path.join(z_path, '%02d.npy' % i), z)
                            i = i + 1
                        i = 0
                        for mu in mu_list:
                            np.save(os.path.join(mu_path, '%02d.npy' % i), mu)
                            i = i + 1
                        i = 0
                        for sigma in sigma_list:
                            np.save(os.path.join(sigma_path, '%02d.npy' % i), sigma)
                            i = i + 1
                        curr_num += 1
                        print("%04d/%04d: data loaded successfully at %s" % (curr_num, max_num, filename))
                    except:
                        shutil.rmtree(midi_store_path)
                        print("unable to load the file at %s" % filename)
                else:
                    print("invalid midi file at %s" % filename)
Пример #10
0
def sequence_midi_files_generative(input_dir: str, output_dir: str,
                                   model_file: str):
    model = utils.load_model(model_file)
    files = [
        file for file in os.listdir(input_dir) if file.lower().endswith('.mid')
    ]
    for file in files:
        print(f'Sequencing {file}...')
        notesequence = midi_file_to_note_sequence(f'{input_dir}/{file}')
        seq_arr = utils.seq_to_arr(notesequence, 8)
        num_features = len(seq_arr[0][1:])
        results = sequence_midi_file(seq_arr, model, num_features, 10)
        integrate_output(notesequence, results)
        sequence_proto_to_midi_file(notesequence,
                                    f'{output_dir}/{file[:-4]}_gen.mid')

    print('Done')
Пример #11
0
def convert_to_piano(path, output_path):
    """convert a midi file to piano
    Args:
        path: the path of the midi file
        output_path: the output file path that contains the whole name, e.g., D:/out/out1.mid

    Return:
        None
    """
    noteseq = ns.midi_file_to_note_sequence(path)
    file2 = open('after.txt', 'w+')
    # TODO(wwh): change the factor of the weaken of non-prime tracks
    for note in noteseq.notes:
        if note.program != 68:
            note.velocity = int(note.velocity / 2)
        note.program = 0
    file2.write(str(noteseq))
    ns.note_sequence_to_midi_file(noteseq, output_path)
Пример #12
0
def sequence_midi_files_transform(input_dir: str, output_dir: str,
                                  model_file: str):
    model = utils.load_model(model_file)
    files = [
        file for file in os.listdir(input_dir) if file.lower().endswith('.mid')
    ]
    for file in files:
        print(f'Sequencing {file}...')
        notesequence = midi_file_to_note_sequence(f'{input_dir}/{file}')
        seq_arr = utils.seq_to_arr(notesequence, 8)
        num_features = len(seq_arr[0][1:])
        inputs = torch.tensor(np.array(seq_arr)[:,
                                                1:]).view(1, -1, num_features)
        inputs = inputs.cuda().float()
        with torch.no_grad():
            results = model(inputs)
        integrate_output(notesequence, results.view(-1).tolist())
        sequence_proto_to_midi_file(notesequence,
                                    f'{output_dir}/{file[:-4]}_trans.mid')
Пример #13
0
def encode(trained_model, midi_batch=None):
    """encode a midi_batch according to a trained model.

    Args:
        trained_model: the trained model, may be loaded from checkpoints or
          use the music_vae.trained_model.TrainedModel method.
        midi_batch: the input batch of midi file names.

    Return:
        the encoded dictionary latent_vec, that takes the midi file name as index
          >>> latent_vec['test.midi']
          (z, mu, sigma)
          this is an example of usage.
    """
    noteseqs = {}
    for midi_path in midi_batch:
        noteseq = note_seq.midi_file_to_note_sequence(midi_path)
        midi_file_name = os.path.basename(midi_path)
        noteseqs[midi_file_name] = noteseq
    '''
    Note: before we encode some note sequences,
    we should always check its length when converted to tensors.
    If the length of the tensors is greater than 1,
    we should split it to several sequences.
    '''
    latent_vecs = {}
    config = configs.CONFIG_MAP['hierdec-mel_16bar']
    for midi_file_name in noteseqs:
        noteseq = noteseqs[midi_file_name]
        tensors = config.data_converter.to_tensors(noteseq)
        if not tensors.inputs:
            raise NoExtractedExamplesError(
                'No examples extracted from NoteSequence: %s' % midi_file_name)
        inputs = []
        controls = []
        lengths = []
        for i in range(len(tensors.inputs)):
            inputs.append(tensors.inputs[i])
            controls.append(tensors.controls[i])
            lengths.append(tensors.lengths[i])
        z, mu, sigma = trained_model.encode_tensors(inputs, lengths, controls)
        latent_vecs[midi_file_name] = (z, mu, sigma)
    return latent_vecs
Пример #14
0
import note_seq
import numpy as np
import operator
import sympy as sym
import sys

targ, inter = sys.argv[1], sys.argv[2]

targ_seq = note_seq.midi_file_to_note_sequence(targ)
inter_seq = note_seq.midi_file_to_note_sequence(inter)

fin_targ = [(note.pitch, note.start_time, note.end_time)
            for note in targ_seq.notes]
fin_inter = [(note.pitch, note.start_time, note.end_time)
             for note in inter_seq.notes]

# Note sequence to list of Notes function
final_mu = compare_sequences(targ, inter)


def compare_sequences(
        target, interpreted):  # Calculate average difference in semitones
    x, y = sym.symbols('x y')
    f, g = sym.symbols('target interpreted', cls=sym.Function)

    # Target/Interpreted are lists of Note objects
    trg = [(note.midi, note.start, note.end) for note in target]
    terp = [(note.midi, note.start, note.end) for note in interpreted]

    trg_funcs = [(note[0], sym.And(note[1] <= x, x <= note[2]))
                 for note in trg]
def generate_midi(prime_loc, partial_loc, total_loc):

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        print('inside input_gen')
        # These values will be changed by subsequent cells.
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # initializing targets and decoder_length
    targets = []
    decode_length = 0

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())

    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    # convert our input midi to note sequence.
    prime_ns = note_seq.midi_file_to_note_sequence(prime_loc)

    # Handle sustain pedal in the primer.
    primer_ns = note_seq.apply_sustain_control_changes(prime_ns)

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, np.random.randint(0, 10) + len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    print('generating the continuation of the input midi')
    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = note_seq.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer.
    total_ns = note_seq.concatenate_sequences([primer_ns, ns])

    # saving our generated music for future reference
    note_seq.sequence_proto_to_midi_file(ns, partial_loc)
    note_seq.sequence_proto_to_midi_file(total_ns, total_loc)

    print('finished generating.... returning the final file')

    return partial_loc
#@markdown Because we use a 
#@markdown [representation](http://g.co/magenta/performance-rnn)
#@markdown where each event corresponds to a variable amount of
#@markdown time, the actual number of seconds generated may vary.

targets = []
decode_length = 1024

# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']

# Decode to NoteSequence.
midi_filename = decode(
    sample_ids,
    encoder=unconditional_encoders['targets'])
unconditional_ns = note_seq.midi_file_to_note_sequence(midi_filename)

# Play and plot.
note_seq.play_sequence(
    unconditional_ns,
    synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
note_seq.plot_sequence(unconditional_ns)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).

note_seq.sequence_proto_to_midi_file(
    unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')

#@title Choose Priming Sequence
Пример #17
0
from note_seq import midi_file_to_note_sequence

file = '/Users/Leo/Documents/data/lmd_full/1/1a0b97ea56d84ab9b74bbf9e8104bd93.mid'
a = midi_file_to_note_sequence(file)
Пример #18
0
def harmonize(file_path, output_dir, coconet_model, batch_size = 1, file_name = '', to_piano = False):
    """harmonize a midi file

    Args:
        file_path: the input path
        coconet_model: the loaded coconet model
        output_dir: the output path
        file_name: the name of the generated piece
        batch_size: how many samples to generate each time
        to_piano: whether or not to convert the output to piano

    Return:
        None
    """
    file1 = open('original.txt', 'w+')
    noteseq = ns.midi_file_to_note_sequence(file_path)
    file1.write(str(noteseq))
    strategy = "harmonize_midi_melody"
    generator = cs.Generator(coconet_model, strategy)
    midi_outs = generator.run_generation(midi_in = pretty_midi.PrettyMIDI(file_path),
                                         gen_batch_size = batch_size)

    # Creates a folder for storing the process of the sampling.
    label = "%s_harmonized_%s" % (file_name, lib_util.timestamp())
    basepath = os.path.join(output_dir, label)
    tf.logging.info("basepath: %s", basepath)
    tf.gfile.MakeDirs(basepath)

    # Saves the results as midi or returns as midi out.
    midi_path = os.path.join(basepath, "midi")
    tf.gfile.MakeDirs(midi_path)
    tf.logging.info("Made directory %s", midi_path)
    cs.save_midis(midi_outs, midi_path, label)

    result_npy_save_path = os.path.join(basepath, "generated_result.npy")
    tf.logging.info("Writing final result to %s", result_npy_save_path)
    with tf.gfile.Open(result_npy_save_path, "wb") as p:
        np.save(p, generator.pianorolls)

    # Stores all the (intermediate) steps.
    intermediate_steps_path = os.path.join(basepath, "intermediate_steps.npz")
    with lib_util.timing("writing_out_sample_npz"):
        tf.logging.info("Writing intermediate steps to %s", intermediate_steps_path)
        generator.logger.dump(intermediate_steps_path)

    # Save the prime as midi and npy if in harmonization mode.
    # First, checks the stored npz for the first (context) and last step.
    tf.logging.info("Reading to check %s", intermediate_steps_path)
    with tf.gfile.Open(intermediate_steps_path, "rb") as p:
        foo = np.load(p)
        for key in foo.keys():
            if re.match(r"0_root/.*?_strategy/.*?_context/0_pianorolls", key):
                context_rolls = foo[key]
                context_fpath = os.path.join(basepath, "context.npy")
                tf.logging.info("Writing context to %s", context_fpath)
                with lib_util.atomic_file(context_fpath) as context_p:
                    np.save(context_p, context_rolls)
                if "harm" in strategy:
                    # Only synthesize the one prime if in Midi-melody-prime mode.
                    primes = context_rolls
                    if "Melody" in strategy:
                        primes = [context_rolls[0]]
                    prime_midi_outs = cs.get_midi_from_pianorolls(primes, generator.decoder)
                    cs.save_midis(prime_midi_outs, midi_path, label + "_prime")
                break
    tf.logging.info("Done")
    if to_piano:
        file_list = os.listdir(midi_path)
        path = os.path.join(output_dir, "%s_piano_harmonized_%s" % (file_name, lib_util.timestamp()))
        tf.gfile.MakeDirs(path)
        for file_path in file_list:
            midi_file_name = os.path.basename(file_path)
            save_path = os.path.join(path, midi_file_name)
            convert_to_piano(os.path.join(midi_path, file_path), save_path)
def run(config_map):
  """Load model params, save config file and start trainer.

  Args:
    config_map: Dictionary mapping configuration name to Config object.

  Raises:
    ValueError: if required flags are missing or invalid.
  """
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

  if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
    raise ValueError(
        'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')
  if FLAGS.output_dir is None:
    raise ValueError('`--output_dir` is required.')
  tf.gfile.MakeDirs(FLAGS.output_dir)
  if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
    raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

  if FLAGS.config not in config_map:
    raise ValueError('Invalid config name: %s' % FLAGS.config)
  config = config_map[FLAGS.config]
  config.data_converter.max_tensors_per_item = None

  if FLAGS.mode == 'interpolate':
    if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
      raise ValueError(
          '`--input_midi_1` and `--input_midi_2` must be specified in '
          '`interpolate` mode.')
    input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
    input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
    if not os.path.exists(input_midi_1):
      raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
    if not os.path.exists(input_midi_2):
      raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
    input_1 = note_seq.midi_file_to_note_sequence(input_midi_1)
    input_2 = note_seq.midi_file_to_note_sequence(input_midi_2)

    def _check_extract_examples(input_ns, path, input_number):
      """Make sure each input returns exactly one example from the converter."""
      tensors = config.data_converter.to_tensors(input_ns).outputs
      if not tensors:
        print(
            'MusicVAE configs have very specific input requirements. Could not '
            'extract any valid inputs from `%s`. Try another MIDI file.' % path)
        sys.exit()
      elif len(tensors) > 1:
        basename = os.path.join(
            FLAGS.output_dir,
            '%s_input%d-extractions_%s-*-of-%03d.mid' %
            (FLAGS.config, input_number, date_and_time, len(tensors)))
        for i, ns in enumerate(config.data_converter.from_tensors(tensors)):
          note_seq.sequence_proto_to_midi_file(
              ns, basename.replace('*', '%03d' % i))
        print(
            '%d valid inputs extracted from `%s`. Outputting these potential '
            'inputs as `%s`. Call script again with one of these instead.' %
            (len(tensors), path, basename))
        sys.exit()
    logging.info(
        'Attempting to extract examples from input MIDIs using config `%s`...',
        FLAGS.config)
    _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
    _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

  logging.info('Loading model...')
  if FLAGS.run_dir:
    checkpoint_dir_or_path = os.path.expanduser(
        os.path.join(FLAGS.run_dir, 'train'))
  else:
    checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
  model = TrainedModel(
      config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
      checkpoint_dir_or_path=checkpoint_dir_or_path)

  if FLAGS.mode == 'interpolate':
    logging.info('Interpolating...')
    _, mu, _ = model.encode([input_1, input_2])
    z = np.array([
        _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])
    results = model.decode(
        length=config.hparams.max_seq_len,
        z=z,
        temperature=FLAGS.temperature)
  elif FLAGS.mode == 'sample':
    logging.info('Sampling...')
    results = model.sample(
        n=FLAGS.num_outputs,
        length=config.hparams.max_seq_len,
        temperature=FLAGS.temperature)

  basename = os.path.join(
      FLAGS.output_dir,
      '%s_%s_%s-*-of-%03d.mid' %
      (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
  logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
  for i, ns in enumerate(results):
    note_seq.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

  logging.info('Done.')
Пример #20
0
def generate_midi(midi_input):
    # Create input generator.
    def input_generator():
        global inputs
        while True:
            yield {
                'inputs': np.array([[inputs]], dtype=np.int32),
                'targets': np.zeros([1, 0], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }


    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    melody_conditioned_samples = estimator.predict(
        input_fn, checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(melody_conditioned_samples)

    #@title Choose Melody
    #@markdown Here you can choose a melody to be accompanied by the
    #@markdown model.  We have provided a few, or you can upload a
    #@markdown MIDI file; if your MIDI file is polyphonic, the notes
    #@markdown with highest pitch will be used as the melody.

    # Tokens to insert between melody events.

    # @title Generate from Scratch
    # @markdown Generate a piano performance from scratch.
    # @markdown
    # @markdown This can take a minute or so depending on the length
    # @markdown of the performance the model ends up generating.
    # @markdown Because we use a
    # @markdown [representation](http://g.co/magenta/performance-rnn)
    # @markdown where each event corresponds to a variable amount of
    # @markdown time, the actual number of seconds generated may vary.
    event_padding = 2 * [note_seq.MELODY_NO_EVENT]
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
              for e in midi_input
              for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
        ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)


    targets = []
    decode_length = 4096
    # decode_length = np.random.randint(len(inputs)*3,len(inputs)*5)
    # print(((decode_length) - len(inputs))/len(inputs))
    sample_ids = next(melody_conditioned_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(
        sample_ids,
        encoder=melody_conditioned_encoders['targets'])
    accompaniment_ns = note_seq.midi_file_to_note_sequence(midi_filename)




    # Use one of the provided melodies.
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
            for e in midi_input
            for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
      ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)

    # Play and plot the melody.
    note_seq.play_sequence(
        melody_ns,
        synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    note_seq.plot_sequence(melody_ns)