def testToNoteSequenceMultipleChunksWithChords(self):
     sequence = copy.deepcopy(self.sequence)
     testing_lib.add_track_to_sequence(sequence, 0, [
         (64, 100, 0, 2),
         (60, 100, 0, 4),
         (67, 100, 2, 4),
         (62, 100, 4, 6),
         (59, 100, 4, 8),
         (67, 100, 6, 8),
     ])
     testing_lib.add_track_to_sequence(sequence,
                                       1, [
                                           (40, 100, 0, 0.125),
                                           (50, 100, 0, 0.125),
                                           (50, 100, 2, 2.125),
                                           (40, 100, 4, 4.125),
                                           (50, 100, 4, 4.125),
                                           (50, 100, 6, 6.125),
                                       ],
                                       is_drum=True)
     testing_lib.add_chords_to_sequence(sequence, [('C', 0), ('G', 4)])
     converter = data_hierarchical.MultiInstrumentPerformanceConverter(
         hop_size_bars=4,
         chunk_size_bars=2,
         chord_encoding=mm.MajorMinorChordOneHotEncoding())
     tensors = converter.to_tensors(sequence)
     self.assertEqual(1, len(tensors.outputs))
     sequences = converter.to_notesequences(tensors.outputs,
                                            tensors.controls)
     self.assertEqual(1, len(sequences))
     self.assertProtoEquals(sequence, sequences[0])
 def testToNoteSequenceMultipleChunks(self):
     sequence = copy.deepcopy(self.sequence)
     testing_lib.add_track_to_sequence(sequence, 0, [
         (64, 100, 0, 2),
         (60, 100, 0, 4),
         (67, 100, 2, 4),
         (62, 100, 4, 6),
         (59, 100, 4, 8),
         (67, 100, 6, 8),
     ])
     testing_lib.add_track_to_sequence(sequence,
                                       1, [
                                           (40, 100, 0, 0.125),
                                           (50, 100, 0, 0.125),
                                           (50, 100, 2, 2.125),
                                           (40, 100, 4, 4.125),
                                           (50, 100, 4, 4.125),
                                           (50, 100, 6, 6.125),
                                       ],
                                       is_drum=True)
     converter = data_hierarchical.MultiInstrumentPerformanceConverter(
         hop_size_bars=4, chunk_size_bars=2)
     tensors = converter.to_tensors(sequence)
     self.assertEqual(1, len(tensors.outputs))
     sequences = converter.to_notesequences(tensors.outputs)
     self.assertEqual(1, len(sequences))
     self.assertProtoEquals(sequence, sequences[0])
Example #3
0
multiperf_hparams_big = merge_hparams(
    lstm_models.get_default_hparams(),
    HParams(batch_size=256,
            max_seq_len=512,
            z_size=512,
            enc_rnn_size=[2048],
            dec_rnn_size=[1024, 1024, 1024]))

CONFIG_MAP['hier-multiperf_vel_1bar_med'] = Config(
    model=MusicVAE(multiperf_encoder, multiperf_decoder),
    hparams=multiperf_hparams_med,
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-3,
                                                                        3)),
    data_converter=data_hierarchical.MultiInstrumentPerformanceConverter(
        num_velocity_bins=8,
        hop_size_bars=1,
        max_num_instruments=8,
        max_events_per_instrument=64,
    ),
    train_examples_path=None,
    eval_examples_path=None,
)

CONFIG_MAP['hier-multiperf_vel_1bar_big'] = Config(
    model=MusicVAE(multiperf_encoder, multiperf_decoder),
    hparams=multiperf_hparams_big,
    note_sequence_augmenter=data.NoteSequenceAugmenter(transpose_range=(-3,
                                                                        3)),
    data_converter=data_hierarchical.MultiInstrumentPerformanceConverter(
        num_velocity_bins=8,
        hop_size_bars=1,
        max_num_instruments=8,