Пример #1
0
def get_config(batch_size, data_path):
    return configs.Config(
        model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                       lstm_models.CategoricalLstmDecoder()),
        hparams=merge_hparams(
            lstm_models.get_default_hparams(),
            HParams(
                batch_size=512,
                max_seq_len=32,  # 2 bars w/ 16 steps per bar
                z_size=512,
                enc_rnn_size=[2048],
                dec_rnn_size=[2048, 2048, 2048],
                free_bits=0,
                max_beta=0.5,
                beta_rate=0.99999,
                sampling_schedule='inverse_sigmoid',
                sampling_rate=1000,
            )),
        note_sequence_augmenter=data.NoteSequenceAugmenter(
            transpose_range=(-5, 5)),
        data_converter=data.OneHotMelodyConverter(
            valid_programs=data.MEL_PROGRAMS,
            skip_polyphony=False,
            max_bars=100,  # Truncate long melodies before slicing.
            slice_bars=2,
            steps_per_quarter=4),
        train_examples_path=data_path,
        eval_examples_path=data_path,
    )
Пример #2
0
def magent_melody_extractor(midi_path, max_bars=100):
    data_converter = music_vae_data.OneHotMelodyConverter(
        valid_programs=music_vae_data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=max_bars,  # Truncate long melodies before slicing.
        slice_bars=16,
        steps_per_quarter=4)

    ns = convert_midi("", '', midi_path)
    all_melodies = []
    try:
        note_sequences = sequences_lib.split_note_sequence_on_time_changes(ns)
        for ns in note_sequences:
            # filter notes --------------------------------------------------------
            def filter(note):
                if (data_converter._valid_programs is not None and note.program
                        not in data_converter._valid_programs):
                    return False
                return data_converter._min_pitch <= note.pitch <= data_converter._max_pitch

            notes = list(ns.notes)
            del ns.notes[:]
            ns.notes.extend([n for n in notes if filter(n)])

            # quantize sequence ---------------------------------------------------
            quantized_sequence = mm.quantize_note_sequence(
                ns, data_converter._steps_per_quarter)

            # extract melodies ----------------------------------------------------
            melodies, _ = data_converter._event_extractor_fn(
                quantized_sequence)
            all_melodies.extend(melodies)
    except:
        pass
    return all_melodies
Пример #3
0
  def testIsTraining(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=2)
    self.is_training = True
    self.assertEqual(2, len(converter.to_tensors(self.sequence)[0]))

    converter.max_tensors_per_notesequence = None
    self.assertEqual(5, len(converter.to_tensors(self.sequence)[0]))
Пример #4
0
  def testIsTraining(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=2)
    converter.set_mode('train')
    self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs))

    converter.max_tensors_per_notesequence = None
    self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs))
Пример #5
0
  def testMaxOutputsPerNoteSequence(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=2)
    self.assertEqual(2, len(converter.to_tensors(self.sequence)[0]))

    converter.max_tensors_per_notesequence = 3
    self.assertEqual(3, len(converter.to_tensors(self.sequence)[0]))

    converter.max_tensors_per_notesequence = 100
    self.assertEqual(5, len(converter.to_tensors(self.sequence)[0]))
Пример #6
0
  def testToNoteSequence(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=4, max_tensors_per_notesequence=1)
    _, output_tensors = converter.to_tensors(
        filter_instrument(self.sequence, 0))
    sequences = converter.to_notesequences(output_tensors)

    self.assertEqual(1, len(sequences))
    expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    expected_sequence.tempos.add(qpm=120)
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(32, 80, 1.0, 2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)])
    self.assertProtoEquals(expected_sequence, sequences[0])
Пример #7
0
  def testToNoteSequenceChordConditioned(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=4, max_tensors_per_notesequence=1,
        chord_encoding=mm.MajorMinorChordOneHotEncoding())
    tensors = converter.to_tensors(
        filter_instrument(self.sequence, 0))
    sequences = converter.to_notesequences(tensors.outputs, tensors.controls)

    self.assertEqual(1, len(sequences))
    expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    expected_sequence.tempos.add(qpm=120)
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(32, 80, 1.0, 2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)])
    testing_lib.add_chords_to_sequence(
        expected_sequence, [('N.C.', 0), ('F', 1), ('C', 4)])
    self.assertProtoEquals(expected_sequence, sequences[0])
Пример #8
0
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
            free_bits=0,
            max_beta=0.2,
            beta_rate=0.99999,
            sampling_schedule='inverse_sigmoid',
            sampling_rate=1000,
        )),
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-5,
                                                                        5)),
    data_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=False,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)

CONFIG_MAP['cat-mel_2bar_big'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=512,
Пример #9
0
config_map['cat-mel_2bar_small'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=256,
            enc_rnn_size=[512],
            dec_rnn_size=[256, 256],
        )),
    note_sequence_augmenter=None,
    note_sequence_converter=data.OneHotMelodyConverter(
        valid_programs=data.MEL_PROGRAMS,
        skip_polyphony=True,
        max_bars=100,  # Truncate long melodies before slicing.
        slice_bars=2,
        steps_per_quarter=4),
    train_examples_path=None,
    eval_examples_path=None,
)

config_map['cat-mel_2bar_big'] = Config(
    model=MusicVAE(lstm_models.BidirectionalLstmEncoder(),
                   lstm_models.CategoricalLstmDecoder()),
    hparams=merge_hparams(
        lstm_models.get_default_hparams(),
        HParams(
            batch_size=512,
            max_seq_len=32,  # 2 bars w/ 16 steps per bar
            z_size=512,