Exemple #1
0
def get_config(batch_size, data_path):
    return configs.Config(
        model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                       lstm_models.CategoricalLstmDecoder()),
        hparams=merge_hparams(
            lstm_models.get_default_hparams(),
            HParams(
                batch_size=512,
                max_seq_len=32,  # 2 bars w/ 16 steps per bar
                z_size=512,
                enc_rnn_size=[2048],
                dec_rnn_size=[2048, 2048, 2048],
                free_bits=0,
                max_beta=0.5,
                beta_rate=0.99999,
                sampling_schedule='inverse_sigmoid',
                sampling_rate=1000,
            )),
        note_sequence_augmenter=data.NoteSequenceAugmenter(
            transpose_range=(-5, 5)),
        data_converter=data.OneHotMelodyConverter(
            valid_programs=data.MEL_PROGRAMS,
            skip_polyphony=False,
            max_bars=100,  # Truncate long melodies before slicing.
            slice_bars=2,
            steps_per_quarter=4),
        train_examples_path=data_path,
        eval_examples_path=data_path,
    )
Exemple #2
0
def _update_config(config1, config2):
    """Update config1 hparams with hparams from config2"""
    h = merge_hparams(config1.hparams, config2.hparams)
    return Config(model=config1.model,
                  hparams=h,
                  note_sequence_augmenter=config1.note_sequence_augmenter,
                  data_converter=config1.data_converter,
                  train_examples_path=config1.train_examples_path,
                  eval_examples_path=config1.eval_examples_path)
Exemple #3
0
    return Config(**config_dict)


CONFIG_MAP = {}

# Melody
CONFIG_MAP['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
            free_bits=0,
            max_beta=0.2,
            beta_rate=0.99999,
            sampling_schedule='inverse_sigmoid',
            sampling_rate=1000,
        )),
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-5,
                                                                        5)),
    data_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
Exemple #4
0
    config_dict = config.values()
    config_dict.update(update_dict)
    return Config(**config_dict)


config_map = {}

# Melody
config_map['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
        )),
    note_sequence_augmenter=None,
    note_sequence_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=True,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)
Exemple #5
0
  config_dict.update(update_dict)
  return Config(**config_dict)


CONFIG_MAP = {}


# Melody
CONFIG_MAP['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
        )),
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-5, 5)),
    data_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)
Exemple #6
0
  config_dict.update(update_dict)
  return Config(**config_dict)


CONFIG_MAP = {}


# Melody
CONFIG_MAP['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
        )),
    note_sequence_augmenter=None,
    note_sequence_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)
Exemple #7
0

CONFIG_MAP = {}


# Melody
CONFIG_MAP['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
            free_bits=0,
            max_beta=0.2,
            beta_rate=0.99999,
            sampling_schedule='inverse_sigmoid',
            sampling_rate=1000,
        )),
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-5, 5)),
    data_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
from magenta.models.music_vae.data import BASS_PROGRAMS
from magenta.models.music_vae.data import NoteSequenceAugmenter
from magenta.models.music_vae.data import OneHotMelodyConverter
from magenta.models.music_vae.music_vae_train import FLAGS
from magenta.models.music_vae.music_vae_train import run

CONFIG_MAP["cat-bass_2bar_small"] = Config(
  model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                 lstm_models.CategoricalLstmDecoder()),
  hparams=merge_hparams(
    lstm_models.get_default_hparams(),
    tf.contrib.training.HParams(
      batch_size=512,
      max_seq_len=32,
      z_size=256,
      enc_rnn_size=[512],
      dec_rnn_size=[256, 256],
      free_bits=0,
      max_beta=0.2,
      beta_rate=0.99999,
      sampling_schedule="inverse_sigmoid",
      sampling_rate=1000,
    )),
  note_sequence_augmenter=NoteSequenceAugmenter(transpose_range=(-5, 5)),
  data_converter=OneHotMelodyConverter(
    valid_programs=BASS_PROGRAMS,
    skip_polyphony=False,
    max_bars=100,
    slice_bars=2,
    steps_per_quarter=4),
  train_examples_path=None,
  eval_examples_path=None,
Exemple #9
0
	config_dict.update(update_dict)
	return Config(**config_dict)


CONFIG_MAP = dict()

CONFIG_MAP['lc-cat-mel_2bar_big'] = Config(
	model=LCMusicVAE(lstm_models.BidirectionalLstmEncoder(), lstm_models.CategoricalLstmDecoder()),
	hparams=merge_hparams(
		lstm_models.get_default_hparams(),
		HParams(
			batch_size=2,
			max_seq_len=32,  # 2 bars w/ 16 steps per bar
			z_size=512,
			encoded_z_size=8,
			enc_rnn_size=[2048],
			dec_rnn_size=[128, 128],
			free_bits=0,
			max_beta=0.5,
			beta_rate=0.99999,
			sampling_schedule='inverse_sigmoid',
			sampling_rate=1000,
		)),
	note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-5, 5)),
	data_converter=data.OneHotMelodyConverter(
		valid_programs=data.MEL_PROGRAMS,
		skip_polyphony=False,
		max_bars=100,  # Truncate long melodies before slicing.
		slice_bars=2,
		steps_per_quarter=4),
	train_examples_path=None,
Exemple #10
0
                                          gap_bars=2)
# simplest version of the 16 bar trio because it has the largest size
# and I can understand it best
CONFIG_MAP['flat-trio_16bar'] = Config(
    model=MusicVAE(
        lstm_models.BidirectionalLstmEncoder(),
        lstm_models.MultiOutCategoricalLstmDecoder(output_depths=[
            90,  # melody
            90,  # bass
            512,  # drums
        ])),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=256,
            max_seq_len=256,
            z_size=512,
            enc_rnn_size=[2048, 2048],
            dec_rnn_size=[2048, 2048, 2048],
        )),
    note_sequence_augmenter=None,
    data_converter=trio_16bar_converter,
    train_examples_path='./MidiSet1/',
    eval_examples_path=None,
)


def main():
    # test1()
    import_songs()
Exemple #11
0
    slice_bars=16,
    steps_per_quarter=4)

CONFIG_MAP['16bar_44_1'] = Config(
    model=MusicVAE(
        lstm_models.BidirectionalLstmEncoder(),
        lstm_models.HierarchicalLstmDecoder(
            lstm_models.CategoricalLstmDecoder(),
            level_lengths=[16, 16],
            disable_autoregression=True)),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=256,
            z_size=512,
            enc_rnn_size=[2048, 2048],
            dec_rnn_size=[1024, 1024],
            free_bits=256,
            max_beta=0.2,
        )),
    note_sequence_augmenter=None,
    data_converter=data.OneHotMelodyConverter(
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=16,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)