Exemplo n.º 1
0
import time
"""
###################################################################
#                       Configuration                             #                                                                                                    
###################################################################
"""
# default configuraction. Based on Magenta
default_configs = {
    'drum_kit':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.music.protobuf.generator_pb2.GeneratorDetails(
            id='drum_kit',
            description='Drums RNN with multiple drums and binary counters.'),
        magenta.music.LookbackEventSequenceEncoderDecoder(
            magenta.music.MultiDrumOneHotEncoding(),
            lookback_distances=[],
            binary_counter_bits=6),
        contrib_training.HParams(batch_size=64,
                                 rnn_layer_sizes=[256, 256],
                                 dropout_keep_prob=0.5,
                                 attn_length=32,
                                 clip_norm=3,
                                 learning_rate=0.001))
}

# TensorFlow Flags.
#Flags to configure evaluation and training. Spanish edition
Flags = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
    'run_dir', '/tmp/Deepbeat/logdir/run1',
    'Directorio  donde se encuentran los checkpoinst y '
    'los análisis de los eventos.Se crearán en directorios separados para '
Exemplo n.º 2
0
      The log likelihood of `drums` under this model.
    """
        return self._evaluate_log_likelihood([drums])[0]


# Default configurations.
default_configs = {
    'one_drum':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='one_drum', description='Drums RNN with 2-state encoding.'),
        magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MultiDrumOneHotEncoding([
                [39] +  # use hand clap as default when decoding
                range(mm.MIN_MIDI_PITCH, 39) + range(39, mm.MAX_MIDI_PITCH + 1)
            ])),
        magenta.common.HParams(batch_size=128,
                               rnn_layer_sizes=[128, 128],
                               dropout_keep_prob=0.5,
                               skip_first_n_losses=0,
                               clip_norm=5,
                               initial_learning_rate=0.001,
                               decay_steps=1000,
                               decay_rate=0.95)),
    'drum_kit':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='drum_kit',
            description='Drums RNN with multiple drums and binary counters.'),
        magenta.music.LookbackEventSequenceEncoderDecoder(
            magenta.music.MultiDrumOneHotEncoding(),
            lookback_distances=[],
Exemplo n.º 3
0
            branch_factor,
            steps_per_iteration,
            modify_events_callback=modify_events_callback)

    def polyphonic_sequence_log_likelihood(self, sequence):
        """Evaluate the log likelihood of a polyphonic sequence.

    Args:
      sequence: The PolyphonicSequence object for which to evaluate the log
          likelihood.

    Returns:
      The log likelihood of `sequence` under this model.
    """
        return self._evaluate_log_likelihood([sequence])[0]


default_configs = {
    'polyphony':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='polyphony', description='Polyphonic RNN'),
        magenta.music.OneHotEventSequenceEncoderDecoder(
            polyphony_encoder_decoder.PolyphonyOneHotEncoding()),
        tf.contrib.training.HParams(batch_size=64,
                                    rnn_layer_sizes=[256, 256, 256],
                                    dropout_keep_prob=0.5,
                                    clip_norm=5,
                                    learning_rate=0.001)),
}
Exemplo n.º 4
0
      The log likelihood of `drums` under this model.
    """
    return self._evaluate_log_likelihood([drums])[0]


# Default configurations.
default_configs = {
    'one_drum':
        events_rnn_model.EventSequenceRnnConfig(
            magenta.music.protobuf.generator_pb2.GeneratorDetails(
                id='one_drum', description='Drums RNN with 2-state encoding.'),
            magenta.music.OneHotEventSequenceEncoderDecoder(
                magenta.music.MultiDrumOneHotEncoding(
                    [[39] +  # use hand clap as default when decoding
                     list(range(mm.MIN_MIDI_PITCH, 39)) +
                     list(range(39, mm.MAX_MIDI_PITCH + 1))])),
            contrib_training.HParams(
                batch_size=128,
                rnn_layer_sizes=[128, 128],
                dropout_keep_prob=0.5,
                clip_norm=5,
                learning_rate=0.001),
            steps_per_quarter=2),
    'drum_kit':
        events_rnn_model.EventSequenceRnnConfig(
            magenta.music.protobuf.generator_pb2.GeneratorDetails(
                id='drum_kit',
                description='Drums RNN with multiple drums and binary counters.'
            ),
            magenta.music.LookbackEventSequenceEncoderDecoder(
                magenta.music.MultiDrumOneHotEncoding(),
 def setUp(self):
     self.config = events_rnn_model.EventSequenceRnnConfig(
         None, mm.PianorollEncoderDecoder(88),
         tf.contrib.training.HParams())
 def setUp(self):
   super(PianorollPipelineTest, self).setUp()
   self.config = events_rnn_model.EventSequenceRnnConfig(
       None, note_seq.PianorollEncoderDecoder(88), contrib_training.HParams())
 def setUp(self):
     self.config = events_rnn_model.EventSequenceRnnConfig(
         None,
         magenta.music.OneHotEventSequenceEncoderDecoder(
             polyphony_encoder_decoder.PolyphonyOneHotEncoding()),
         magenta.common.HParams())
Exemplo n.º 8
0
 def setUp(self):
     self.config = events_rnn_model.EventSequenceRnnConfig(
         None,
         magenta.music.OneHotEventSequenceEncoderDecoder(
             magenta.music.MultiDrumOneHotEncoding()),
         tf.contrib.training.HParams())
Exemplo n.º 9
0
    """
        return self._generate_events(num_steps=num_steps,
                                     primer_events=primer_sequence,
                                     temperature=None,
                                     beam_size=beam_size,
                                     branch_factor=branch_factor,
                                     steps_per_iteration=steps_per_iteration)


default_configs = {
    'rnn-nade':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='rnn-nade', description='RNN-NADE'),
        mm.PianorollEncoderDecoder(),
        tf.contrib.training.HParams(batch_size=64,
                                    rnn_layer_sizes=[128, 128, 128],
                                    nade_hidden_units=128,
                                    dropout_keep_prob=0.5,
                                    clip_norm=5,
                                    learning_rate=0.001)),
    'rnn-nade_attn':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='rnn-nade_attn', description='RNN-NADE with attention.'),
        mm.PianorollEncoderDecoder(),
        tf.contrib.training.HParams(batch_size=48,
                                    rnn_layer_sizes=[128, 128],
                                    attn_length=32,
                                    nade_hidden_units=128,
                                    dropout_keep_prob=0.5,
                                    clip_norm=5,
Exemplo n.º 10
0
 def setUp(self):
     self.config = events_rnn_model.EventSequenceRnnConfig(
         None, mm.PianorollEncoderDecoder(88), magenta.common.HParams())
Exemplo n.º 11
0
      beam_size: An integer, beam size to use when generating tracks via
          beam search.
      branch_factor: An integer, beam search branch factor to use.
      steps_per_iteration: An integer, number of steps to take per beam search
          iteration.
    Returns:
      The generated PolyphonicSequence object (which begins with the provided
      primer track).
    """
        return self._generate_events(num_steps, primer_sequence, temperature,
                                     beam_size, branch_factor,
                                     steps_per_iteration)


default_configs = {
    'polyphony':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='polyphony', description='Polyphonic RNN'),
        magenta.music.OneHotEventSequenceEncoderDecoder(
            polyphony_encoder_decoder.PolyphonyOneHotEncoding()),
        magenta.common.HParams(batch_size=64,
                               rnn_layer_sizes=[256, 256, 256],
                               dropout_keep_prob=0.5,
                               skip_first_n_losses=10,
                               clip_norm=5,
                               initial_learning_rate=0.001,
                               decay_steps=1000,
                               decay_rate=0.95)),
}
Exemplo n.º 12
0
                                     primer_events=primer_sequence,
                                     temperature=None,
                                     beam_size=beam_size,
                                     branch_factor=branch_factor,
                                     steps_per_iteration=steps_per_iteration)


default_configs = {
    'rnn-nade':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='rnn-nade', description='RNN-NADE'),
        mm.PianorollEncoderDecoder(),
        magenta.common.HParams(batch_size=64,
                               rnn_layer_sizes=[128, 128, 128],
                               nade_hidden_units=128,
                               dropout_keep_prob=0.5,
                               skip_first_n_losses=10,
                               clip_norm=5,
                               initial_learning_rate=0.001,
                               decay_steps=1000,
                               decay_rate=0.95)),
    'rnn-nade_attn':
    events_rnn_model.EventSequenceRnnConfig(
        magenta.protobuf.generator_pb2.GeneratorDetails(
            id='rnn-nade_attn', description='RNN-NADE with attention.'),
        mm.PianorollEncoderDecoder(),
        magenta.common.HParams(batch_size=48,
                               rnn_layer_sizes=[128, 128],
                               attn_length=32,
                               nade_hidden_units=128,
Exemplo n.º 13
0
 def setUp(self):
   super().setUp()
   self.config = events_rnn_model.EventSequenceRnnConfig(
       None,
       note_seq.OneHotEventSequenceEncoderDecoder(
           note_seq.MultiDrumOneHotEncoding()), contrib_training.HParams())