Пример #1
0
 def __init__(self):
     """
     Loads and initializes the vae
     """
     print("Initializing Music VAE...")
     self.music_vae = TrainedModel(
         configs.CONFIG_MAP['cat-mel_2bar_big'],
         batch_size=4,
         checkpoint_dir_or_path='/content/mel_2bar_big.ckpt')
     print('🎉 Done!')
def music_vae_sample(model_id, model_config, num):
    music_vae = TrainedModel(configs.CONFIG_MAP[model_config],
                             batch_size=4,
                             checkpoint_dir_or_path=model_id + '.tar')

    generated_sequences = music_vae.sample(n=num, length=80, temperature=1.0)

    cnt = 1
    for ns in generated_sequences:
        note_sequence_to_midi_file(
            ns, 'vae_sample_' + model_id + '_%d.mid' % (cnt))
        cnt += 1
Пример #3
0
def landing():
    form = LandingForm()
    if form.validate_on_submit():
        num_bars = form.num_bars.data
        temperature = form.temperature.data
        config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med']
        weight_name = 'model_fb256.ckpt'
        model = TrainedModel(config,
                             batch_size=BATCH_SIZE,
                             checkpoint_dir_or_path=os.path.join(
                                 app.root_path, 'content', weight_name))
        model._config.data_converter._max_tensors_per_input = None

        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([slerp(z1, z2, t) for t in np.linspace(0, 1, num_bars)])

        seqs = model.decode(length=TOTAL_STEPS, z=z, temperature=temperature)

        trim_sequences(seqs)
        fix_instruments_for_concatenation(seqs)
        interp_ns = concatenate_sequences(seqs)
        f_ext = '.mid'
        random_hex = secrets.token_hex(8)
        music_fn = random_hex + f_ext
        music_mp3 = random_hex + ".mp3"
        #Save music to disk
        mm.sequence_proto_to_midi_file(interp_ns, music_fn)

        ### Move audio to a specified path
        source_path = "/home/pratheesh/Flask_Blog/" + music_mp3
        destination_path = os.path.join(app.root_path, 'static/music',
                                        music_mp3)

        cmd_to_wav = "fluidsynth -F " + random_hex + ".wav /usr/share/sounds/sf2/FluidR3_GM.sf2 " + music_fn
        print(cmd_to_wav)
        os.system(cmd_to_wav)
        cmd_to_mp3 = "lame --preset standard " + random_hex + ".wav " + random_hex + ".mp3"
        print(cmd_to_mp3)
        os.system(cmd_to_mp3)
        #shutil.move(source_path, destination_path)
        #os.replace(source_path, destination_path)
        print("moving file")
        os.rename(source_path, destination_path)
        os.remove(music_fn)
        os.remove(random_hex + ".wav")

        music_file = url_for('static', filename='music/' + music_mp3)

        return render_template('music_output.html', music_file=music_file)

    return render_template('landing.html', form=form)
def music_vae_interpolate(sequence1, sequence2, model_id, model_config, num):
    music_vae = TrainedModel(configs.CONFIG_MAP[model_config],
                             batch_size=4,
                             checkpoint_dir_or_path=model_id + '.tar')

    note_sequences = music_vae.interpolate(sequence1,
                                           sequence2,
                                           num_steps=num,
                                           length=32)

    # Concatenate them into one long sequence, with the start and
    # end sequences at each end.
    return mm.sequences_lib.concatenate_sequences(note_sequences)
Пример #5
0
def decode_vectors(model_file, vectors, concatenate=False):
    temperature = 1.0  #param: min:0.1, max:1.5
    config = configs.CONFIG_MAP['flat-mel_16bar']
    model = TrainedModel(config,
                         batch_size=512,
                         checkpoint_dir_or_path=model_file)
    resulting_midis = model.decode(vectors, length=256)
    if concatenate:
        concatenated_midis = concatenate_sequences(resulting_midis)
        download(concatenated_midis, "concatenated_midi.mid")
        print("created 1 midi.")
    else:
        for i, p in enumerate(resulting_midis):
            download(p, "newly_created_" + str(i) + ".mid")
            print("created " + str(len(resulting_midis)) + " midis")
Пример #6
0
def main():
    # load trained model
    music_vae = TrainedModel(
        configs.CONFIG_MAP['cat-mel_2bar_big'],
        batch_size=4,
        checkpoint_dir_or_path='checkpoints/mel_2bar_big.ckpt')

    # generate some sequences
    generated_sequences = music_vae.sample(n=10, length=80, temperature=1.0)

    # save sequences to files
    for n, sequence in enumerate(generated_sequences):
        music.sequence_proto_to_midi_file(
            sequence, os.path.join('output',
                                   str(n) + '.mid'))
Пример #7
0
def load_model_sequence(model_path, config, note_sequence_file):
	my_model = TrainedModel(config, batch_size=512, checkpoint_dir_or_path=model_path)
	print("loaded the model")

	note_sequences = mm.note_sequence_io.note_sequence_record_iterator(note_sequence_file)
	print("loaded the note_sequence")
	return my_model, note_sequences
Пример #8
0
    def initialize(self):
        if self.is_conditioned:
            config_string = 'hier-multiperf_vel_1bar_med_chords'
            ckpt_string = 'model_chords_fb64.ckpt'

        else:
            config_string = 'hier-multiperf_vel_1bar_med'
            ckpt_string = 'model_fb256.ckpt'

        config = configs.CONFIG_MAP[config_string]
        self.model = TrainedModel(
                        config, batch_size=BATCH_SIZE,
                        checkpoint_dir_or_path=f'models/{ckpt_string}')

        if not self.is_conditioned:
            self.model._config.data_converter._max_tensors_per_input = None
Пример #9
0
 def load_model(self,
                model='hierdec-trio_16bar',
                path='./content/checkpoints/trio_16bar_hierdec.ckpt'):
     print 'Loading models...'
     self.configs[model] = configs.CONFIG_MAP[model]
     self.trio_models[model] = TrainedModel(self.configs[model],
                                            batch_size=4,
                                            checkpoint_dir_or_path=path)
Пример #10
0
def init_music_vae(model_name):
    print("initializing generator...")
    music_vae = TrainedModel(configs.CONFIG_MAP[model_name],
                             batch_size=4,
                             checkpoint_dir_or_path='./content/' + model_name +
                             '.ckpt')
    print('initializing🎉 done!')
    return music_vae
Пример #11
0
def main(input_file, output_file,
         chunk_size, buffer_size,
         batch_size, checkpoint,
         log_period, log_file):
    args = locals()
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    log = logging.getLogger(__name__)
    handler = logging.FileHandler(log_file)
    handler.setFormatter(formatter)
    handler.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.setLevel(logging.DEBUG)

    log.info('Generating melody dataset with args:\n' + pformat(args))
    total_start_time = time()
    ns_gen = mm.note_sequence_io.note_sequence_record_iterator(input_file)
    ns_iter = iter(ns_gen)
    config = configs.CONFIG_MAP[MODEL_NAME]
    trio_converter = config.data_converter

    log.debug('Creating HDF5 store...')
    start_time = time()
    with h5py.File(output_file, 'w') as data_file:
        dataset_size = buffer_size
        ds_trio = data_file.create_dataset(
            'trio',
            (dataset_size, TIMESTEPS, DIM_TRIO),
            maxshape=(None, TIMESTEPS, DIM_TRIO),
            dtype=np.bool
        )
        ds_melody = data_file.create_dataset(
            'melody',
            (dataset_size, TIMESTEPS, DIM_MELODY),
            maxshape=(None, TIMESTEPS, DIM_MELODY),
            dtype=np.bool
        )
        ds_bass = data_file.create_dataset(
            'bass',
            (dataset_size, TIMESTEPS, DIM_BASS),
            maxshape=(None, TIMESTEPS, DIM_BASS),
            dtype=np.bool
        )
        ds_drums = data_file.create_dataset(
            'drums',
            (dataset_size, TIMESTEPS, DIM_DRUMS),
            maxshape=(None, TIMESTEPS, DIM_DRUMS),
            dtype=np.bool
        )
        ds_code = data_file.create_dataset(
            'code',
            (dataset_size, config.hparams.z_size),
            maxshape=(None, config.hparams.z_size),
            dtype=np.float32
        )
        log.debug('Done creating HDF5 store (time: {0:.1f}s)'
                  .format(time() - start_time))

        log.debug('Loading model...')
        start_time = time()
        model = TrainedModel(config, batch_size=batch_size,
                             checkpoint_dir_or_path=checkpoint)
        log.debug('Done loading model (time: {0:.1f}s)'
                  .format(time() - start_time))

        log.info('Beginning dataset creation...')
        i_chunk = 0
        i_example = 0
        try:
            while True:
                i_chunk += 1
                log.disabled = i_chunk % log_period != 0 or not log_period
                chunk_time = time()

                log.debug('Processing a chunk of NoteSequences...')
                start_time = time()

                note_sequences = list(it.islice(ns_iter, chunk_size))
                if not note_sequences:
                    break

                trio_tensors = map(
                    lambda seq: trio_converter.to_tensors(seq).outputs,
                    note_sequences
                )
                trio_tensors = it.chain.from_iterable(trio_tensors)
                trio_tensors = list(
                    filter(lambda t: t.shape == (TIMESTEPS, DIM_TRIO),
                           trio_tensors)
                )

                # Ensure an example doesn't overflow the allocated space
                trio_tensors = trio_tensors[:buffer_size]
                n_tensors = len(trio_tensors)
                i_last = n_tensors + i_example

                melody_tensors = list(map(lambda t: t[:, :DIM_MELODY],
                                          trio_tensors))
                bass_tensors = list(map(
                    lambda t: t[:, DIM_MELODY:DIM_MELODY + DIM_BASS],
                    trio_tensors
                ))
                drums_tensors = list(map(lambda t: t[:, -DIM_DRUMS:],
                                         trio_tensors))
                log.debug('Done processing NoteSequences (time: {0:.1f}s)'
                          .format(time() - start_time))

                log.debug('Running encoder...')
                start_time = time()
                _, codes, _ = model.encode_tensors(deepcopy(trio_tensors),
                                                   [TIMESTEPS] * n_tensors)
                log.debug('Done running encoder (time: {0:.1f}s)'
                          .format(time() - start_time))

                if i_last >= dataset_size:
                    dataset_size += buffer_size
                    log.info('Resizing datasets to size:', dataset_size)
                    ds_trio.resize((dataset_size, TIMESTEPS, DIM_TRIO))
                    ds_melody.resize((dataset_size, TIMESTEPS, DIM_MELODY))
                    ds_bass.resize((dataset_size, TIMESTEPS, DIM_BASS))
                    ds_drums.resize((dataset_size, TIMESTEPS, DIM_DRUMS))
                    ds_code.resize((dataset_size, config.hparams.z_size))

                log.debug('Writing examples to HDF5...')
                start_time = time()
                ds_trio[i_example:i_last, :, :] = np.array(trio_tensors)
                ds_melody[i_example:i_last, :, :] = np.array(melody_tensors)
                ds_bass[i_example:i_last, :, :] = np.array(bass_tensors)
                ds_drums[i_example:i_last, :, :] = np.array(drums_tensors)
                ds_code[i_example:i_last, :] = np.array(codes)
                log.debug('Done writing examples to HDF5 (time: {0:.1f}s)'
                          .format(time() - start_time))

                i_example += n_tensors

                log.info(('Chunk {0} wrote {1} examples ' +
                         '(total: {2}; time: {3:.1f}s)')
                         .format(i_chunk, n_tensors, i_example,
                                 time() - chunk_time))
        except StopIteration:
            pass

    log.debug('Finished writing data')
    log.debug('Resizing datasets...')
    dataset_size = i_example
    ds_trio.resize((dataset_size, TIMESTEPS, DIM_TRIO))
    ds_melody.resize((dataset_size, TIMESTEPS, DIM_MELODY))
    ds_bass.resize((dataset_size, TIMESTEPS, DIM_BASS))
    ds_drums.resize((dataset_size, TIMESTEPS, DIM_DRUMS))
    ds_code.resize((dataset_size, config.hparams.z_size))
    log.debug('Done resizing datasets...')

    total_time = time() - total_start_time
    log.info('Finished creating HDF5 dataset')
    log.info('Total examples: {}'.format(i_example))
    log.info('Total chunks: {}'.format(i_chunk))
    log.info('Total time: {0:.1f}s'.format(total_time))
    log.info('Done!')
Пример #12
0
    return interp_seq if num_steps > 3 else note_sequences[num_steps // 2]


def download(note_sequence, filename):
    mm.sequence_proto_to_midi_file(note_sequence, filename)
    # files.download(filename)


print 'Done setting up environment'

#@title Load the pre-trained models.
print 'Loading pre-trained models...'
trio_models = {}
hierdec_trio_16bar_config = configs.CONFIG_MAP['hierdec-trio_16bar']
trio_models['hierdec_trio_16bar'] = TrainedModel(
    hierdec_trio_16bar_config,
    batch_size=4,
    checkpoint_dir_or_path='./content/checkpoints/trio_16bar_hierdec.ckpt')

# flat_trio_16bar_config = configs.CONFIG_MAP['flat-trio_16bar']
# trio_models['baseline_flat_trio_16bar'] = TrainedModel(flat_trio_16bar_config, batch_size=4, checkpoint_dir_or_path='./checkpoints/trio_16bar_flat.ckpt')
print 'Done loading models'

#@title Generate 4 samples from the selected model prior.
print 'Generating samples...'
trio_sample_model = "hierdec_trio_16bar"  #@param ["hierdec_trio_16bar", "baseline_flat_trio_16bar"]
temperature = 0.5  #@param {type:"slider", min:0.1, max:1.5, step:0.1}

trio_16_samples = trio_models[trio_sample_model].sample(
    n=4, length=256, temperature=temperature)
for ns in trio_16_samples:
    play(ns)
Пример #13
0
import magenta.music as mm
from magenta.models.music_vae import configs
from magenta.models.music_vae.trained_model import TrainedModel
from magenta.protobuf import music_pb2

# Initialize the model.
# print "Initializing Music VAE..."
music_vae = TrainedModel(configs.CONFIG_MAP['cat-mel_2bar_big'],
                         batch_size=4,
                         checkpoint_dir_or_path='./content/mel_2bar_big.ckpt')

# How many sequences, including the start and end ones, to generate.
num_steps = 4

twinkle_twinkle = music_pb2.NoteSequence()
twinkle_twinkle.notes.add(pitch=60, start_time=0.0, end_time=0.5, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=1.0, end_time=1.5, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=1.5, end_time=2.0, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.0, end_time=2.5, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle_twinkle.total_time = 8
twinkle_twinkle.tempos.add(qpm=60)
Пример #14
0
class MusicVae:
    """
    Author: Tanish and Akshit
    Last Modified: 02/02/21
    Version: 1.2
    Class to wrap the music vae trained from magenta
    """
    def __init__(self):
        """
        Loads and initializes the vae
        """
        print("Initializing Music VAE...")
        self.music_vae = TrainedModel(
            configs.CONFIG_MAP['cat-mel_2bar_big'],
            batch_size=4,
            checkpoint_dir_or_path='/content/mel_2bar_big.ckpt')
        print('🎉 Done!')

    def generate(self, n=2, length=80, temperature=1.0):
        """
        Generates a random music sequence

        Args:
            n: number of samples to generate 
                type: int
            length: length of each sample
                type: int
            temperature: emphirical magnitude of randomness in generated sequences
                type: float
        Returns:
            List[NotesSequence] of generated music
        """
        generated_sequences = music_vae.sample(n=2, length=80, temperature=1.0)
        for ns in generated_sequences:
            note_seq.play_sequence(ns, synth=note_seq.fluidsynth)
        return generated_sequences

    def interpolate(self, sequence_one, sequence_two, num_steps=8):
        """
        Continues a music sequence

        Args:
            sequence_one: first sequence
                type: NoteSequence object
            sequence_two: second sequence
                type: NoteSequence object
            num_steps: number of sequences to interpolate through
                type: int
        Returns:
            NotesSequence object of interpolated music
        """
        # This gives us a list of sequences.
        note_sequences = self.music_vae.interpolate(twinkle_twinkle,
                                                    teapot,
                                                    num_steps=num_steps,
                                                    length=32)

        # Concatenate them into one long sequence, with the start and
        # end sequences at each end.
        interp_seq = note_seq.sequences_lib.concatenate_sequences(
            note_sequences)

        note_seq.play_sequence(interp_seq, synth=note_seq.fluidsynth)
        return interp_seq
Пример #15
0
class MusicVAE(MambaMagentaModel):
    """
    ## Music Variational Autoencoder
    Paper at: https://arxiv.org/abs/1803.05428

    Now this is a unique model. And definitely the fan favorite.

    """
    def __init__(self, genre, args=None, is_conditioned=True, info=None,
                 is_empty_model=False):
        super(MusicVAE, self).__init__(genre, args, info, is_empty_model=is_empty_model)
        self.title = "music_vae"
        self.get_model()
        self.is_conditioned = is_conditioned
        self.initialize()

    def slerp(self, p0, p1, t):
        """
        Spherical linear interpolation in the latent space, will help decode
        and generate the models later on.
        """
        omega = np.arccos(np.dot(np.squeeze(p0/np.linalg.norm(p0)), np.squeeze(p1/np.linalg.norm(p1))))
        so = np.sin(omega)
        return np.sin((1.0 - t)*omega) / so * p0 + np.sin(t * omega)/so * p1

    def chord_encoding(self, chord):
        index = mm.TriadChordOneHotEncoding().encode_event(chord)
        c = np.zeros([TOTAL_STEPS, CHORD_DEPTH])
        c[0, 0] = 1.0
        c[1:, index] = 1.0
        return c

    def fix_instruments_for_concatenation(self, note_sequences):
        instruments = {}
        for i in range(len(note_sequences)):
            for note in note_sequences[i].notes:
                if not note.is_drum:
                    if note.program not in instruments:
                        if len(instruments) >= 8:
                            instruments[note.program] = len(instruments) + 2
                        else:
                            instruments[note.program] = len(instruments) + 1
                    note.instrument = instruments[note.program]
                else:
                    note.instrument = 9

    def get_model(self, model_string="music_vae"):

        # models folder already exists with this repository.
        os.chdir("models")
        dir_name = os.getcwd()
        files = os.listdir(dir_name)
        expected_files = ['model_fb256.ckpt.index',
                          'model_fb256.ckpt.meta',
                          'model_chords_fb64.ckpt.meta',
                          'model_chords_fb64.ckpt.index',
                          'model_fb256.ckpt.data-00000-of-00001',
                          'model_chords_fb64.ckpt.data-00000-of-00001']

        # if the length of this is 6, no need to redownload checkpoints
        set_len = len(set(files).intersection(set(expected_files)))

        if set_len != 6:
            print("Getting checkpoints. Please wait..")
            os.system(f"gsutil -q -m cp gs://download.magenta.tensorflow.org/models/music_vae/multitrack/* {dir_name}")
            print("Successfully retrieved all checkpoints")
            self.model_name = f"{model_string}"
        else:
            print("Checkpoints already exist in model folder!")
            self.model_name = f"{model_string}"
        os.chdir("..")

    def initialize(self):
        if self.is_conditioned:
            config_string = 'hier-multiperf_vel_1bar_med_chords'
            ckpt_string = 'model_chords_fb64.ckpt'

        else:
            config_string = 'hier-multiperf_vel_1bar_med'
            ckpt_string = 'model_fb256.ckpt'

        config = configs.CONFIG_MAP[config_string]
        self.model = TrainedModel(
                        config, batch_size=BATCH_SIZE,
                        checkpoint_dir_or_path=f'models/{ckpt_string}')

        if not self.is_conditioned:
            self.model._config.data_converter._max_tensors_per_input = None

    def generate(self, empty=False,
                 num_bars=64, temperature=0.5, backup_seq=None,
                 chord_arr=None):
        # Interpolation, Repeating Chord Progression
        if chord_arr is None:
            if backup_seq is not None:
                self.sequence = copy.deepcopy(backup_seq)

            if hasattr(self, 'temperature'):
                temperature = self.temperature
            copy_sequence = copy.deepcopy(self.sequence)

            quantized_sequence = mm.quantize_note_sequence(copy_sequence, 8)
            # infer chords for sequence is a bit more natural
            mm.infer_chords_for_sequence(quantized_sequence)

            chords = []
            for annotation in quantized_sequence.text_annotations:
                if annotation.annotation_type == CHORD_SYMBOL:
                    chord_name = annotation.text
                    chords.append(chord_name)
        else:
            # follow a user defined chord progression
            chords = chord_arr
        mod_val = len(chords)
        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([self.slerp(z1, z2, t)
                    for t in np.linspace(0, 1, num_bars)])

        seqs = [
            self.model.decode(length=TOTAL_STEPS, z=z[i:i+1, :], temperature=temperature,
                        c_input=self.chord_encoding(chords[i % mod_val]))[0]
            for i in range(num_bars)
        ]

        self.fix_instruments_for_concatenation(seqs)
        prog_ns = concatenate_sequences(seqs)
        request_dict = self.put_request_dict
        generated_sequence_2_mp3(prog_ns, f"{self.unique_id}",
                                 request_dict=request_dict)

    def trim_sequence(self, seq, num_seconds=12.0):
        seq = mm.extract_subsequence(seq, 0.0, num_seconds)
        seq.total_time = num_seconds
Пример #16
0
        if note.program not in instruments:
          if len(instruments) >= 8:
            instruments[note.program] = len(instruments) + 2
          else:
            instruments[note.program] = len(instruments) + 1
        note.instrument = instruments[note.program]
      else:
        note.instrument = 9

"""# Chord-Conditioned Model"""

#@title Load Checkpoint

config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med_chords']
model = TrainedModel(
    config, batch_size=BATCH_SIZE,
    checkpoint_dir_or_path='/content/model_chords_fb64.ckpt')

#@title Same Chord, Random Styles

chord = 'C' #@param {type:"string"}
temperature = 0.2 #@param {type:"slider", min:0.01, max:1.5, step:0.01}
seqs = model.sample(n=BATCH_SIZE, length=TOTAL_STEPS, temperature=temperature,
                    c_input=chord_encoding(chord))

trim_sequences(seqs)
play(seqs)

#@title Same Style, Chord Progression

chord_1 = 'C' #@param {type:"string"}
Пример #17
0
# Magenta specific stuff
from magenta.models.music_vae import configs
from magenta.models.music_vae.trained_model import TrainedModel
from magenta import music as mm
from magenta.music import midi_synth
from magenta.music import midi_io

# Load some configs to be used later
dc_tap = configs.CONFIG_MAP['groovae_2bar_tap_fixed_velocity'].data_converter

# load model
GROOVAE_2BAR_TAP_FIXED_VELOCITY = "groovae_2bar_tap_fixed_velocity.tar"
config_2bar_tap = configs.CONFIG_MAP['groovae_2bar_tap_fixed_velocity']
groovae_2bar_tap = TrainedModel(config_2bar_tap,
                                1,
                                checkpoint_dir_or_path="models/" +
                                GROOVAE_2BAR_TAP_FIXED_VELOCITY)


# Calculate quantization steps but do not remove microtiming
def quantize(s, steps_per_quarter=4):
    return mm.sequences_lib.quantize_note_sequence(s, steps_per_quarter)


def is_4_4(s):
    ts = s.time_signatures[0]
    return (ts.numerator == 4 and ts.denominator == 4)


# Some midi files come by default from different instrument channels
# Quick and dirty way to set midi files to be recognized as drums
"""# 2-Bar Drums Model

Below are 4 pre-trained models to experiment with. The first 3 map the 61 MIDI drum "pitches" to a reduced set of 9 classes (bass, snare, closed hi-hat, open hi-hat, low tom, mid tom, high tom, crash cymbal, ride cymbal) for a simplified but less expressive output space. The last model uses a [NADE](http://homepages.inf.ed.ac.uk/imurray2/pub/11nade/) to represent all possible MIDI drum "pitches".

* **drums_2bar_oh_lokl**: This *low* KL model was trained for more *realistic* sampling. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 0 free bits, and had a fixed beta value of 0.8. After 300k steps, the final accuracy is 0.73 and KL divergence is 11 bits.
* **drums_2bar_oh_hikl**: This *high* KL model was trained for *better reconstruction and interpolation*. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 96 free bits and had a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k, steps the final accuracy is 0.97 and KL divergence is 107 bits.
* **drums_2bar_nade_reduced**: This model outputs a multi-label "pianoroll" with 9 classes. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 9-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 96 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.98 and KL divergence is 110 bits.
* **drums_2bar_nade_full**:  The output is a multi-label "pianoroll" with 61 classes. A single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 61-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 0 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.90 and KL divergence is 116 bits.
"""

#@title Load Pretrained Models

drums_models = {}
# One-hot encoded.
drums_config = configs.CONFIG_MAP['cat-drums_2bar_small']
drums_models['drums_2bar_oh_lokl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.lokl.ckpt')
drums_models['drums_2bar_oh_hikl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.hikl.ckpt')

# Multi-label NADE.
drums_nade_reduced_config = configs.CONFIG_MAP['nade-drums_2bar_reduced']
drums_models['drums_2bar_nade_reduced'] = TrainedModel(drums_nade_reduced_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.reduced.ckpt')
drums_nade_full_config = configs.CONFIG_MAP['nade-drums_2bar_full']
drums_models['drums_2bar_nade_full'] = TrainedModel(drums_nade_full_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.full.ckpt')

"""## Generate Samples"""

#@title Generate 4 samples from the prior of one of the models listed above.
drums_sample_model = "drums_2bar_oh_lokl" #@param ["drums_2bar_oh_lokl", "drums_2bar_oh_hikl", "drums_2bar_nade_reduced", "drums_2bar_nade_full"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
drums_samples = drums_models[drums_sample_model].sample(n=4, length=32, temperature=temperature)
for ns in drums_samples:
Пример #19
0
def gen_chords():
    form = ChordForm()
    print("Chords CHOICES from form:" + str(form.chord_1.choices))
    form.chord_2.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_3.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_4.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    if form.validate_on_submit():
        chord_1 = form.chord_1.data
        chord_2 = form.chord_2.data
        chord_3 = form.chord_3.data
        chord_4 = form.chord_4.data
        chords = [chord_1, chord_2, chord_3, chord_4]

        num_bars = form.num_bars.data
        temperature = form.temperature.data

        config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med_chords']
        weight_name = 'model_chords_fb64.ckpt'
        model = TrainedModel(config,
                             batch_size=BATCH_SIZE,
                             checkpoint_dir_or_path=os.path.join(
                                 app.root_path, 'content', weight_name))

        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([slerp(z1, z2, t) for t in np.linspace(0, 1, num_bars)])

        seqs = [
            model.decode(length=TOTAL_STEPS,
                         z=z[i:i + 1, :],
                         temperature=temperature,
                         c_input=chord_encoding(chords[i % 4]))[0]
            for i in range(num_bars)
        ]

        trim_sequences(seqs)
        fix_instruments_for_concatenation(seqs)
        prog_interp_ns = concatenate_sequences(seqs)

        f_ext = '.mid'
        random_hex = secrets.token_hex(8)
        music_fn = random_hex + f_ext
        music_mp3 = random_hex + ".mp3"

        #Save music to disk
        mm.sequence_proto_to_midi_file(prog_interp_ns, music_fn)

        ### Move audio to a specified path
        source_path = "/home/pratheesh/Flask_Blog/" + music_mp3
        destination_path = os.path.join(app.root_path, 'static/music',
                                        music_mp3)

        cmd_to_wav = "fluidsynth -F " + random_hex + ".wav /usr/share/sounds/sf2/FluidR3_GM.sf2 " + music_fn
        print(cmd_to_wav)
        os.system(cmd_to_wav)
        cmd_to_mp3 = "lame --preset standard " + random_hex + ".wav " + random_hex + ".mp3"
        print(cmd_to_mp3)
        os.system(cmd_to_mp3)
        #shutil.move(source_path, destination_path)
        #os.replace(source_path, destination_path)
        print("moving file")
        os.rename(source_path, destination_path)
        os.remove(music_fn)
        os.remove(random_hex + ".wav")

        music_file = url_for('static', filename='music/' + music_mp3)

        return render_template('music_output.html', music_file=music_file)

    return render_template('gen_chords.html', form=form)
Пример #20
0
teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80)
teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80)
teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80)
teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80)
teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80)
teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80)
teapot.total_time = 8

teapot.tempos.add(qpm=60)

#mm.plot_sequence(teapot)
#mm.play_sequence(teapot,synth=mm.synthesize)
# Initialize the model.
print("Initializing Music VAE...")
music_vae = TrainedModel(configs.CONFIG_MAP['cat-mel_2bar_big'],
                         batch_size=4,
                         checkpoint_dir_or_path='mel_2bar_big.ckpt')

print('🎉 Done!')

generated_sequences = music_vae.sample(n=2, length=80, temperature=1.0)

for ns in generated_sequences:
    # print(ns)
    #mm.plot_sequence(ns)
    #mm.play_sequence(ns, synth=mm.fluidsynth)

    # We're going to interpolate between the Twinkle Twinkle Little Star
    # NoteSequence we defined in the first section, and one of the generated
    # sequences from the previous VAE example
Пример #21
0
tf.enable_eager_execution()

# Load the full GMD with MIDI only (no audio) as a tf.data.Dataset
dataset = tfds.load(name="groove/4bar-midionly",
                    split=tfds.Split.TRAIN,
                    try_gcs=True)

# features = dataset.take(20)
# for f in features:
# 	print(f['style'])

mel_16bar_models = {}
groovae_4bar_config = configs.CONFIG_MAP['groovae_4bar']
mel_16bar_models['groovae_4bar'] = TrainedModel(
    groovae_4bar_config,
    batch_size=1,
    checkpoint_dir_or_path='/Users/rowancheung/Downloads/groovae_4bar.tar')


def play(note_sequence):
    mm.play_sequence(note_sequence, synth=mm.fluidsynth)


def convert_midi(midi):
    return mm.midi_io.midi_to_note_sequence(midi)


def gen_genre_vec(genre):
    vecs = None
    count = 0
    for f in dataset: