def __init__(self, checkpoint=None, bundle=None):
        details = generator_pb2.GeneratorDetails(id='test_generator',
                                                 description='Test Generator')

        super(SeuenceGenerator, self).__init__(Model(),
                                               details,
                                               checkpoint=checkpoint,
                                               bundle=bundle)
    def testUseMatchingGeneratorId(self):
        bundle = generator_pb2.GeneratorBundle(
            generator_details=generator_pb2.GeneratorDetails(
                id='test_generator'),
            checkpoint_file=[b'foo.ckpt'],
            metagraph_file=b'foo.ckpt.meta')

        SeuenceGenerator(bundle=bundle)

        bundle.generator_details.id = 'blarg'

        with self.assertRaises(sequence_generator.SequenceGeneratorError):
            SeuenceGenerator(bundle=bundle)
    def testSpecifyEitherCheckPointOrBundle(self):
        bundle = generator_pb2.GeneratorBundle(
            generator_details=generator_pb2.GeneratorDetails(
                id='test_generator'),
            checkpoint_file=[b'foo.ckpt'],
            metagraph_file=b'foo.ckpt.meta')

        with self.assertRaises(sequence_generator.SequenceGeneratorError):
            SeuenceGenerator(checkpoint='foo.ckpt', bundle=bundle)
        with self.assertRaises(sequence_generator.SequenceGeneratorError):
            SeuenceGenerator(checkpoint=None, bundle=None)

        SeuenceGenerator(checkpoint='foo.ckpt')
        SeuenceGenerator(bundle=bundle)
    def testGetBundleDetails(self):
        # Test with non-bundle generator.
        seq_gen = SeuenceGenerator(checkpoint='foo.ckpt')
        self.assertEqual(None, seq_gen.bundle_details)

        # Test with bundle-based generator.
        bundle_details = generator_pb2.GeneratorBundle.BundleDetails(
            description='bundle of joy')
        bundle = generator_pb2.GeneratorBundle(
            generator_details=generator_pb2.GeneratorDetails(
                id='test_generator'),
            bundle_details=bundle_details,
            checkpoint_file=[b'foo.ckpt'],
            metagraph_file=b'foo.ckpt.meta')
        seq_gen = SeuenceGenerator(bundle=bundle)
        self.assertEqual(bundle_details, seq_gen.bundle_details)
        if (transpose_to_key is not None
                and (transpose_to_key < 0
                     or transpose_to_key > mm.NOTES_PER_OCTAVE - 1)):
            raise ValueError('transpose_to_key must be >= 0 and <= 11. '
                             'transpose_to_key is %d.' % transpose_to_key)

        self.min_note = min_note
        self.max_note = max_note
        self.transpose_to_key = transpose_to_key


# Default configurations.
default_configs = {
    'basic_rnn':
    MelodyRnnConfig(
        generator_pb2.GeneratorDetails(
            id='basic_rnn', description='Melody RNN with one-hot encoding.'),
        magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MelodyOneHotEncoding(min_note=DEFAULT_MIN_NOTE,
                                               max_note=DEFAULT_MAX_NOTE)),
        contrib_training.HParams(batch_size=128,
                                 rnn_layer_sizes=[128, 128],
                                 dropout_keep_prob=0.5,
                                 clip_norm=5,
                                 learning_rate=0.001)),
    'mono_rnn':
    MelodyRnnConfig(generator_pb2.GeneratorDetails(
        id='mono_rnn', description='Monophonic RNN with one-hot encoding.'),
                    magenta.music.OneHotEventSequenceEncoderDecoder(
                        magenta.music.MelodyOneHotEncoding(min_note=0,
                                                           max_note=128)),
                    contrib_training.HParams(batch_size=128,
      steps_per_iteration: The number of steps to take per beam search
          iteration.
    Returns:
      The generated PianorollSequence object (which begins with the provided
      primer track).
    """
    return self._generate_events(
        num_steps=num_steps, primer_events=primer_sequence, temperature=None,
        beam_size=beam_size, branch_factor=branch_factor,
        steps_per_iteration=steps_per_iteration)


default_configs = {
    'rnn-nade': events_rnn_model.EventSequenceRnnConfig(
        generator_pb2.GeneratorDetails(
            id='rnn-nade',
            description='RNN-NADE'),
        mm.PianorollEncoderDecoder(),
        contrib_training.HParams(
            batch_size=64,
            rnn_layer_sizes=[128, 128, 128],
            nade_hidden_units=128,
            dropout_keep_prob=0.5,
            clip_norm=5,
            learning_rate=0.001)),
    'rnn-nade_attn': events_rnn_model.EventSequenceRnnConfig(
        generator_pb2.GeneratorDetails(
            id='rnn-nade_attn',
            description='RNN-NADE with attention.'),
        mm.PianorollEncoderDecoder(),
        contrib_training.HParams(
Beispiel #7
0
            branch_factor,
            steps_per_iteration,
            modify_events_callback=modify_events_callback)

    def polyphonic_sequence_log_likelihood(self, sequence):
        """Evaluate the log likelihood of a polyphonic sequence.

    Args:
      sequence: The PolyphonicSequence object for which to evaluate the log
          likelihood.

    Returns:
      The log likelihood of `sequence` under this model.
    """
        return self._evaluate_log_likelihood([sequence])[0]


default_configs = {
    'polyphony':
    events_rnn_model.EventSequenceRnnConfig(
        generator_pb2.GeneratorDetails(id='polyphony',
                                       description='Polyphonic RNN'),
        magenta.music.OneHotEventSequenceEncoderDecoder(
            polyphony_encoder_decoder.PolyphonyOneHotEncoding()),
        contrib_training.HParams(batch_size=64,
                                 rnn_layer_sizes=[256, 256, 256],
                                 dropout_keep_prob=0.5,
                                 clip_norm=5,
                                 learning_rate=0.001)),
}