Ejemplo n.º 1
0
 def input_fn():
     input_gen = _decode_batch_input_fn_search_based(
             problem_id, num_decode_batches, sorted_inputs, inputs_vocab, targets_vocab,
             decode_hp.batch_size, decode_hp.max_input_size, searcher, translator,
             hparams.problems[problem_id])
     gen_fn = make_input_fn_from_generator(input_gen)
     example = gen_fn()
     return _decode_input_tensor_to_features_dict(example, hparams)
Ejemplo n.º 2
0
 def input_fn():
   """Function for inputs generator."""
   input_gen = _decode_batch_input_fn(
       problem_id, num_decode_batches, inputs, inputs_vocab,
       self.decode_hp.batch_size, self.decode_hp.max_input_size)
   gen_fn = decoding.make_input_fn_from_generator(input_gen)
   example = gen_fn()
   return decoding._decode_input_tensor_to_features_dict(example, hparams)
 def input_fn():
   """Generator that returns just the current query."""
   gen_fn = decoding.make_input_fn_from_generator(server_input_fn())
   example = gen_fn()
   # TODO(kstevens): Make this method public
   # pylint: disable=protected-access
   return decoding._interactive_input_tensor_to_features_dict(
       example, self.hparams)
Ejemplo n.º 4
0
 def input_fn():
   """Generator that returns just the current query."""
   gen_fn = decoding.make_input_fn_from_generator(server_input_fn())
   example = gen_fn()
   # TODO(kstevens): Make this method public
   # pylint: disable=protected-access
   return decoding._interactive_input_tensor_to_features_dict(
       example, self.hparams)
Ejemplo n.º 5
0
 def input_fn():
   """Function for inputs generator."""
   input_gen = _decode_batch_input_fn(
       num_decode_batches, inputs, inputs_vocab,
       self.decode_hp.batch_size, self.decode_hp.max_input_size)
   gen_fn = decoding.make_input_fn_from_generator(input_gen)
   example = gen_fn()
   return decoding._decode_input_tensor_to_features_dict(example,
                                                         self.hparams)
 def input_fn():
     input_gen = decoding._decode_batch_input_fn(num_decode_batches,
                                                 sorted_inputs,
                                                 inputs_vocab,
                                                 decode_hp.batch_size,
                                                 decode_hp.max_input_size)
     gen_fn = decoding.make_input_fn_from_generator(input_gen)
     example = gen_fn()
     return decoding._decode_input_tensor_to_features_dict(example, hparams)
Ejemplo n.º 7
0
 def input_fn():
     input_gen = _decode_batch_input_fn(problem_id, num_decode_batches,
                                        str_tokens, inputs_vocab,
                                        decode_hp.batch_size,
                                        decode_hp.max_input_size)
     gen_fn = decoding.make_input_fn_from_generator(input_gen)
     example = gen_fn()
     return self._decode_input_tensor_to_features_dict(
         example, hparams, encoding_len=encoding_len)
Ejemplo n.º 8
0
 def input_fn():
   """Input function returning features which is a dictionary of
     string feature name to `Tensor` or `SparseTensor`. If it returns a
     tuple, first item is extracted as features. Prediction continues until
     `input_fn` raises an end-of-input exception (`OutOfRangeError` or
     `StopIteration`)."""
   gen_fn = decoding.make_input_fn_from_generator(
       self.__interactive_input_fn())
   example = gen_fn()
   example = decoding._interactive_input_tensor_to_features_dict(
       example, self.hparams)
   return example
Ejemplo n.º 9
0
 def input_fn():
   """Input function returning features which is a dictionary of
     string feature name to `Tensor` or `SparseTensor`. If it returns a
     tuple, first item is extracted as features. Prediction continues until
     `input_fn` raises an end-of-input exception (`OutOfRangeError` or
     `StopIteration`)."""
   gen_fn = decoding.make_input_fn_from_generator(
       self.__interactive_input_fn())
   example = gen_fn()
   example = decoding._interactive_input_tensor_to_features_dict(
       example, self.hparams)
   return example
Ejemplo n.º 10
0
 def input_fn():
     # generator
     input_gen = _decode_batch_input_fn_yr(
         problem_id,
         num_decode_batches,
         sorted_inputs,
         inputs_vocab,
         decode_hp.batch_size,
         decode_hp.max_input_size,
         eos_required=eos_required)  # yield batch
     gen_fn = decoding.make_input_fn_from_generator(input_gen)
     example = gen_fn()
     return _decode_input_tensor_to_features_dict_yr(example, hparams)
Ejemplo n.º 11
0
def generate(
    estimator,
    unconditional_encoders,
    decode_length,
    targets,
    primer_note_sequence,
):
    """
    Generate unconditioned music samples from estimator
    :param estimator: Transformer estimator
    :param unconditional_encoders: A dictionary contains key and its encoder.
    :param decode_length: A number represents the duration of music snippet.
    :param targets: Target input for Transformer.
    :param primer_note_sequence: Notesequence represents the primer.
    :return:
    """

    # Output filename
    tf.gfile.MakeDirs(FLAGS.output_dir)
    date_and_time = time.strftime("%Y-%m-%d_%H%M%S")
    base_name = os.path.join(FLAGS.output_dir,
                             f"unconditioned_{date_and_time:s}.mid")

    # Generating sample
    LOGGER.info("Generating sample.")
    input_function = decoding.make_input_fn_from_generator(
        unconditional_input_generator(targets, decode_length))
    unconditional_samples = estimator.predict(input_function,
                                              checkpoint_path=FLAGS.model_path)

    # Sample events
    LOGGER.info("Generating sample events.")
    sample_ids = next(unconditional_samples)["outputs"]

    # Decode to note sequence
    LOGGER.info("Decoding sample ID")
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders["targets"])
    unconditional_note_seqs = mm.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer
    coninuation_note_sequence = mm.concatenate_sequences(
        [primer_note_sequence, unconditional_note_seqs])

    # Saving MIDI file
    mm.sequence_proto_to_midi_file(coninuation_note_sequence, base_name)
Ejemplo n.º 12
0
def generate(estimator, unconditional_encoders, decode_length, targets,
             primer_ns):
    """
    Generate unconditioned music samples from estimator.
    :param estimator: Transformer estimator.
    :param unconditional_encoders: A dictionary contains key and its encoder.
    :param decode_length: A number represents the duration of music snippet.
    :param targets: Target input for Transformer.
    :param primer_ns: Notesequence represents the primer.
    :return:
    """
    tf.gfile.MakeDirs(FLAGS.output_dir)
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    base_name = os.path.join(FLAGS.output_dir, 'moodzik.mid')
    utils.LOGGER.info('Generating %d samples with format %s' %
                      (FLAGS.num_samples, base_name))
    for i in range(FLAGS.num_samples):
        utils.LOGGER.info('Generating sample %d' % i)
        # Start the Estimator, loading from the specified checkpoint.
        input_fn = decoding.make_input_fn_from_generator(
            utils.unconditional_input_generator(targets, decode_length))
        unconditional_samples = estimator.predict(
            input_fn, checkpoint_path=FLAGS.model_path)

        # Generate sample events.
        utils.LOGGER.info('Generating sample.')
        sample_ids = next(unconditional_samples)['outputs']

        # Decode to NoteSequence
        utils.LOGGER.info('Decoding sample id')
        midi_filename = utils.decode(sample_ids,
                                     encoder=unconditional_encoders['targets'])
        unconditional_ns = utils.mm.midi_file_to_note_sequence(midi_filename)

        # Append continuation to primer if any.
        continuation_ns = utils.mm.concatenate_sequences(
            [primer_ns, unconditional_ns])
        utils.mm.sequence_proto_to_midi_file(
            continuation_ns, base_name.replace('*', '%03d' % i))
Ejemplo n.º 13
0
    def initialize(self, is_conditioned=False):
        self.model_name = 'transformer'
        self.hparams_set = 'transformer_tpu'
        self.conditioned = is_conditioned
        if self.conditioned:
            self.ckpt_path = 'models/checkpoints/melody_conditioned_model_16.ckpt'
            problem = MelodyToPianoPerformanceProblem()
        else:
            self.ckpt_path = 'models/checkpoints/unconditional_model_16.ckpt'
            problem = PianoPerformanceLanguageModelProblem()

        self.encoders = problem.get_feature_encoders()

        # Set up hyperparams
        hparams = trainer_lib.create_hparams(hparams_set=self.hparams_set)
        trainer_lib.add_problem_hparams(hparams, problem)
        hparams.num_hidden_layers = 16
        hparams.sampling_method = 'random'

        # Set up decoding hyperparams
        decode_hparams = decoding.decode_hparams()
        decode_hparams.alpha = 0.0
        decode_hparams.beam_size = 1
        if self.conditioned:
            self.inputs = []
        else:
            self.targets = []

        self.decode_length = 0
        run_config = trainer_lib.create_run_config(hparams)
        estimator = trainer_lib.create_estimator(
            self.model_name, hparams, run_config,
            decode_hparams=decode_hparams)
        fnc = self.input_generation_conditional if self.conditioned else self.input_generator_unconditional
        input_fn = decoding.make_input_fn_from_generator(fnc())
        self.samples = estimator.predict(
            input_fn, checkpoint_path=self.ckpt_path)
        _ = next(self.samples)
Ejemplo n.º 14
0
def music_generator(primer='erik_gnossienne',
                    primer_begin_buffer=10,
                    primer_length=90,
                    output_path='.',
                    filename='./public/output'):
    SF2_PATH = './models/Yamaha-C5-Salamander-JNv5.1.sf2'
    SAMPLE_RATE = 16000

    # Upload a MIDI file and convert to NoteSequence.
    def upload_midi():
        data = list(files.upload().values())
        if len(data) > 1:
            print('Multiple files uploaded; using only one.')
        return mm.midi_to_note_sequence(data[0])

    # Decode a list of IDs.
    def decode(ids, encoder):
        ids = list(ids)
        if text_encoder.EOS_ID in ids:
            ids = ids[:ids.index(text_encoder.EOS_ID)]
        return encoder.decode(ids)

    model_name = 'transformer'
    hparams_set = 'transformer_tpu'
    ckpt_path = './models/checkpoints/unconditional_model_16.ckpt'

    class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
        @property
        def add_eos_symbol(self):
            return True

    problem = PianoPerformanceLanguageModelProblem()
    unconditional_encoders = problem.get_feature_encoders()

    # Set up HParams.
    hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
    trainer_lib.add_problem_hparams(hparams, problem)
    hparams.num_hidden_layers = 16
    hparams.sampling_method = 'random'

    # Set up decoding HParams.
    decode_hparams = decoding.decode_hparams()
    decode_hparams.alpha = 0.0
    decode_hparams.beam_size = 1

    # Create Estimator.
    run_config = trainer_lib.create_run_config(hparams)
    estimator = trainer_lib.create_estimator(model_name,
                                             hparams,
                                             run_config,
                                             decode_hparams=decode_hparams)

    # These values will be changed by subsequent cells.
    targets = []
    decode_length = 0

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        global targets
        global decode_length
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    filenames = {
        'C major arpeggio': './models/primers/c_major_arpeggio.mid',
        'C major scale': './models/primers/c_major_scale.mid',
        'Clair de Lune': './models/primers/clair_de_lune.mid',
        'Classical':
        'audio_midi/Classical_Piano_piano-midi.de_MIDIRip/bach/bach_846_format0.mid',
        'erik_gymnopedie': 'audio_midi/erik_satie/gymnopedie_1_(c)oguri.mid',
        'erik_gymnopedie_2': 'audio_midi/erik_satie/gymnopedie_2_(c)oguri.mid',
        'erik_gymnopedie_3': 'audio_midi/erik_satie/gymnopedie_3_(c)oguri.mid',
        'erik_gnossienne': 'audio_midi/erik_satie/gnossienne_1_(c)oguri.mid',
        'erik_gnossienne_2': 'audio_midi/erik_satie/gnossienne_2_(c)oguri.mid',
        'erik_gnossienne_3': 'audio_midi/erik_satie/gnossienne_3_(c)oguri.mid',
        'erik_gnossienne_dery':
        'audio_midi/erik_satie/gnossienne_1_(c)dery.mid',
        'erik_gnossienne_dery_2':
        'audio_midi/erik_satie/gnossienne_2_(c)dery.mid',
        'erik_gnossienne_dery_3':
        'audio_midi/erik_satie/gnossienne_3_(c)dery.mid',
        'erik_gnossienne_dery_5':
        'audio_midi/erik_satie/gnossienne_5_(c)dery.mid',
        'erik_gnossienne_dery_6':
        'audio_midi/erik_satie/gnossienne_6_(c)dery.mid',
        '1': 'audio_midi/erik_satie/1.mid',
        '2': 'audio_midi/erik_satie/2.mid',
        '3': 'audio_midi/erik_satie/3.mid',
        '4': 'audio_midi/erik_satie/4.mid',
        '5': 'audio_midi/erik_satie/5.mid',
        '6': 'audio_midi/erik_satie/6.mid',
        '7': 'audio_midi/erik_satie/7.mid',
        '8': 'audio_midi/erik_satie/8.mid',
        '9': 'audio_midi/erik_satie/9.mid',
        '10': 'audio_midi/erik_satie/10.mid',
    }
    # primer = 'C major scale'

    #if primer == 'Upload your own!':
    #  primer_ns = upload_midi()
    #else:
    #  # Use one of the provided primers.
    #  primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    # Handle sustain pedal in the primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    max_primer_seconds = primer_length
    if primer_ns.total_time > max_primer_seconds:
        print('Primer is longer than %d seconds, truncating.' %
              max_primer_seconds)
        primer_ns = mm.extract_subsequence(
            primer_ns, primer_begin_buffer,
            max_primer_seconds + primer_begin_buffer)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        print('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    ## Play and plot the primer.
    #mm.play_sequence(
    #    primer_ns,
    #    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    #mm.plot_sequence(primer_ns)
    mm.sequence_proto_to_midi_file(
        primer_ns, join(output_path, 'primer_{}.mid'.format(filename)))

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, 10000 - len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = mm.midi_file_to_note_sequence(midi_filename)
    print('Sample IDs: {}'.format(sample_ids))
    print('Sample IDs length: {}'.format(len(sample_ids)))
    print('Encoder: {}'.format(unconditional_encoders['targets']))
    print('Unconditional Samples: {}'.format(unconditional_samples))
    # print('{}'.format(ns))

    # continuation_ns = mm.concatenate_sequences([primer_ns, ns])
    continuation_ns = ns
    # mm.play_sequence(
    #     continuation_ns,
    #     synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    # mm.plot_sequence(continuation_ns)
    # try:
    audio = mm.fluidsynth(continuation_ns,
                          sample_rate=SAMPLE_RATE,
                          sf2_path=SF2_PATH)

    normalizer = float(np.iinfo(np.int16).max)
    array_of_ints = np.array(np.asarray(audio) * normalizer, dtype=np.int16)

    wavfile.write(join(output_path, filename + '.wav'), SAMPLE_RATE,
                  array_of_ints)
    print('[+] Output stored as {}'.format(filename + '.wav'))
    mm.sequence_proto_to_midi_file(
        continuation_ns,
        join(output_path, 'continuation_{}.mid'.format(filename)))
Ejemplo n.º 15
0
def generate_midi(midi_input):
    # Create input generator.
    def input_generator():
        global inputs
        while True:
            yield {
                'inputs': np.array([[inputs]], dtype=np.int32),
                'targets': np.zeros([1, 0], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }


    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    melody_conditioned_samples = estimator.predict(
        input_fn, checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(melody_conditioned_samples)

    #@title Choose Melody
    #@markdown Here you can choose a melody to be accompanied by the
    #@markdown model.  We have provided a few, or you can upload a
    #@markdown MIDI file; if your MIDI file is polyphonic, the notes
    #@markdown with highest pitch will be used as the melody.

    # Tokens to insert between melody events.

    # @title Generate from Scratch
    # @markdown Generate a piano performance from scratch.
    # @markdown
    # @markdown This can take a minute or so depending on the length
    # @markdown of the performance the model ends up generating.
    # @markdown Because we use a
    # @markdown [representation](http://g.co/magenta/performance-rnn)
    # @markdown where each event corresponds to a variable amount of
    # @markdown time, the actual number of seconds generated may vary.
    event_padding = 2 * [note_seq.MELODY_NO_EVENT]
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
              for e in midi_input
              for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
        ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)


    targets = []
    decode_length = 4096
    # decode_length = np.random.randint(len(inputs)*3,len(inputs)*5)
    # print(((decode_length) - len(inputs))/len(inputs))
    sample_ids = next(melody_conditioned_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(
        sample_ids,
        encoder=melody_conditioned_encoders['targets'])
    accompaniment_ns = note_seq.midi_file_to_note_sequence(midi_filename)




    # Use one of the provided melodies.
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
            for e in midi_input
            for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
      ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)

    # Play and plot the melody.
    note_seq.play_sequence(
        melody_ns,
        synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    note_seq.plot_sequence(melody_ns)
def input_generator():
    global targets
    global decode_length
    while True:
        yield {
            'targets': np.array([targets], dtype=np.int32),
            'decode_length': np.array(decode_length, dtype=np.int32)
        }


# These values will be changed by subsequent cells.
targets = []
decode_length = 0

# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
uncondi_samples = estimator.predict(input_fn,
                                    checkpoint_path=uncondi_ckpt_path)

# "Burn" one.
_ = next(uncondi_samples)
print("ㅇㅣ건잘되지않아?")
targets = []
decode_length = 1024

# Generate sample events.
sample_ids = next(uncondi_samples)['outputs']

# Decode to NoteSequence.
midi_filename = decode(sample_ids, encoder=uncondi_encoders['targets'])
uncondi_ns = mm.midi_file_to_note_sequence(midi_filename)
def generate_midi(prime_loc, partial_loc, total_loc):

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        print('inside input_gen')
        # These values will be changed by subsequent cells.
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # initializing targets and decoder_length
    targets = []
    decode_length = 0

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())

    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    # convert our input midi to note sequence.
    prime_ns = note_seq.midi_file_to_note_sequence(prime_loc)

    # Handle sustain pedal in the primer.
    primer_ns = note_seq.apply_sustain_control_changes(prime_ns)

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, np.random.randint(0, 10) + len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    print('generating the continuation of the input midi')
    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = note_seq.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer.
    total_ns = note_seq.concatenate_sequences([primer_ns, ns])

    # saving our generated music for future reference
    note_seq.sequence_proto_to_midi_file(ns, partial_loc)
    note_seq.sequence_proto_to_midi_file(total_ns, total_loc)

    print('finished generating.... returning the final file')

    return partial_loc