Пример #1
0
def landing():
    form = LandingForm()
    if form.validate_on_submit():
        num_bars = form.num_bars.data
        temperature = form.temperature.data
        config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med']
        weight_name = 'model_fb256.ckpt'
        model = TrainedModel(config,
                             batch_size=BATCH_SIZE,
                             checkpoint_dir_or_path=os.path.join(
                                 app.root_path, 'content', weight_name))
        model._config.data_converter._max_tensors_per_input = None

        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([slerp(z1, z2, t) for t in np.linspace(0, 1, num_bars)])

        seqs = model.decode(length=TOTAL_STEPS, z=z, temperature=temperature)

        trim_sequences(seqs)
        fix_instruments_for_concatenation(seqs)
        interp_ns = concatenate_sequences(seqs)
        f_ext = '.mid'
        random_hex = secrets.token_hex(8)
        music_fn = random_hex + f_ext
        music_mp3 = random_hex + ".mp3"
        #Save music to disk
        mm.sequence_proto_to_midi_file(interp_ns, music_fn)

        ### Move audio to a specified path
        source_path = "/home/pratheesh/Flask_Blog/" + music_mp3
        destination_path = os.path.join(app.root_path, 'static/music',
                                        music_mp3)

        cmd_to_wav = "fluidsynth -F " + random_hex + ".wav /usr/share/sounds/sf2/FluidR3_GM.sf2 " + music_fn
        print(cmd_to_wav)
        os.system(cmd_to_wav)
        cmd_to_mp3 = "lame --preset standard " + random_hex + ".wav " + random_hex + ".mp3"
        print(cmd_to_mp3)
        os.system(cmd_to_mp3)
        #shutil.move(source_path, destination_path)
        #os.replace(source_path, destination_path)
        print("moving file")
        os.rename(source_path, destination_path)
        os.remove(music_fn)
        os.remove(random_hex + ".wav")

        music_file = url_for('static', filename='music/' + music_mp3)

        return render_template('music_output.html', music_file=music_file)

    return render_template('landing.html', form=form)
Пример #2
0
def decode_vectors(model_file, vectors, concatenate=False):
    temperature = 1.0  #param: min:0.1, max:1.5
    config = configs.CONFIG_MAP['flat-mel_16bar']
    model = TrainedModel(config,
                         batch_size=512,
                         checkpoint_dir_or_path=model_file)
    resulting_midis = model.decode(vectors, length=256)
    if concatenate:
        concatenated_midis = concatenate_sequences(resulting_midis)
        download(concatenated_midis, "concatenated_midi.mid")
        print("created 1 midi.")
    else:
        for i, p in enumerate(resulting_midis):
            download(p, "newly_created_" + str(i) + ".mid")
            print("created " + str(len(resulting_midis)) + " midis")
Пример #3
0
trim_sequences(seqs)
play(seqs)

#@title Same Style, Chord Progression

chord_1 = 'C' #@param {type:"string"}
chord_2 = 'Caug' #@param {type:"string"}
chord_3 = 'Am' #@param {type:"string"}
chord_4 = 'E' #@param {type:"string"}
chords = [chord_1, chord_2, chord_3, chord_4]

temperature = 0.2 #@param {type:"slider", min:0.01, max:1.5, step:0.01}
z = np.random.normal(size=[1, Z_SIZE])
seqs = [
    model.decode(length=TOTAL_STEPS, z=z, temperature=temperature,
                 c_input=chord_encoding(c))[0]
    for c in chords
]

trim_sequences(seqs)
fix_instruments_for_concatenation(seqs)
prog_ns = concatenate_sequences(seqs)

play(prog_ns)
mm.plot_sequence(prog_ns)

#@title (Optional) Save Arrangement to MIDI
download(prog_ns, '_'.join(chords) + '.mid')

#@title Style Interpolation, Repeating Chord Progression
Пример #4
0
def gen_chords():
    form = ChordForm()
    print("Chords CHOICES from form:" + str(form.chord_1.choices))
    form.chord_2.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_3.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_4.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    if form.validate_on_submit():
        chord_1 = form.chord_1.data
        chord_2 = form.chord_2.data
        chord_3 = form.chord_3.data
        chord_4 = form.chord_4.data
        chords = [chord_1, chord_2, chord_3, chord_4]

        num_bars = form.num_bars.data
        temperature = form.temperature.data

        config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med_chords']
        weight_name = 'model_chords_fb64.ckpt'
        model = TrainedModel(config,
                             batch_size=BATCH_SIZE,
                             checkpoint_dir_or_path=os.path.join(
                                 app.root_path, 'content', weight_name))

        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([slerp(z1, z2, t) for t in np.linspace(0, 1, num_bars)])

        seqs = [
            model.decode(length=TOTAL_STEPS,
                         z=z[i:i + 1, :],
                         temperature=temperature,
                         c_input=chord_encoding(chords[i % 4]))[0]
            for i in range(num_bars)
        ]

        trim_sequences(seqs)
        fix_instruments_for_concatenation(seqs)
        prog_interp_ns = concatenate_sequences(seqs)

        f_ext = '.mid'
        random_hex = secrets.token_hex(8)
        music_fn = random_hex + f_ext
        music_mp3 = random_hex + ".mp3"

        #Save music to disk
        mm.sequence_proto_to_midi_file(prog_interp_ns, music_fn)

        ### Move audio to a specified path
        source_path = "/home/pratheesh/Flask_Blog/" + music_mp3
        destination_path = os.path.join(app.root_path, 'static/music',
                                        music_mp3)

        cmd_to_wav = "fluidsynth -F " + random_hex + ".wav /usr/share/sounds/sf2/FluidR3_GM.sf2 " + music_fn
        print(cmd_to_wav)
        os.system(cmd_to_wav)
        cmd_to_mp3 = "lame --preset standard " + random_hex + ".wav " + random_hex + ".mp3"
        print(cmd_to_mp3)
        os.system(cmd_to_mp3)
        #shutil.move(source_path, destination_path)
        #os.replace(source_path, destination_path)
        print("moving file")
        os.rename(source_path, destination_path)
        os.remove(music_fn)
        os.remove(random_hex + ".wav")

        music_file = url_for('static', filename='music/' + music_mp3)

        return render_template('music_output.html', music_file=music_file)

    return render_template('gen_chords.html', form=form)
Пример #5
0
class MusicVAE(MambaMagentaModel):
    """
    ## Music Variational Autoencoder
    Paper at: https://arxiv.org/abs/1803.05428

    Now this is a unique model. And definitely the fan favorite.

    """
    def __init__(self, genre, args=None, is_conditioned=True, info=None,
                 is_empty_model=False):
        super(MusicVAE, self).__init__(genre, args, info, is_empty_model=is_empty_model)
        self.title = "music_vae"
        self.get_model()
        self.is_conditioned = is_conditioned
        self.initialize()

    def slerp(self, p0, p1, t):
        """
        Spherical linear interpolation in the latent space, will help decode
        and generate the models later on.
        """
        omega = np.arccos(np.dot(np.squeeze(p0/np.linalg.norm(p0)), np.squeeze(p1/np.linalg.norm(p1))))
        so = np.sin(omega)
        return np.sin((1.0 - t)*omega) / so * p0 + np.sin(t * omega)/so * p1

    def chord_encoding(self, chord):
        index = mm.TriadChordOneHotEncoding().encode_event(chord)
        c = np.zeros([TOTAL_STEPS, CHORD_DEPTH])
        c[0, 0] = 1.0
        c[1:, index] = 1.0
        return c

    def fix_instruments_for_concatenation(self, note_sequences):
        instruments = {}
        for i in range(len(note_sequences)):
            for note in note_sequences[i].notes:
                if not note.is_drum:
                    if note.program not in instruments:
                        if len(instruments) >= 8:
                            instruments[note.program] = len(instruments) + 2
                        else:
                            instruments[note.program] = len(instruments) + 1
                    note.instrument = instruments[note.program]
                else:
                    note.instrument = 9

    def get_model(self, model_string="music_vae"):

        # models folder already exists with this repository.
        os.chdir("models")
        dir_name = os.getcwd()
        files = os.listdir(dir_name)
        expected_files = ['model_fb256.ckpt.index',
                          'model_fb256.ckpt.meta',
                          'model_chords_fb64.ckpt.meta',
                          'model_chords_fb64.ckpt.index',
                          'model_fb256.ckpt.data-00000-of-00001',
                          'model_chords_fb64.ckpt.data-00000-of-00001']

        # if the length of this is 6, no need to redownload checkpoints
        set_len = len(set(files).intersection(set(expected_files)))

        if set_len != 6:
            print("Getting checkpoints. Please wait..")
            os.system(f"gsutil -q -m cp gs://download.magenta.tensorflow.org/models/music_vae/multitrack/* {dir_name}")
            print("Successfully retrieved all checkpoints")
            self.model_name = f"{model_string}"
        else:
            print("Checkpoints already exist in model folder!")
            self.model_name = f"{model_string}"
        os.chdir("..")

    def initialize(self):
        if self.is_conditioned:
            config_string = 'hier-multiperf_vel_1bar_med_chords'
            ckpt_string = 'model_chords_fb64.ckpt'

        else:
            config_string = 'hier-multiperf_vel_1bar_med'
            ckpt_string = 'model_fb256.ckpt'

        config = configs.CONFIG_MAP[config_string]
        self.model = TrainedModel(
                        config, batch_size=BATCH_SIZE,
                        checkpoint_dir_or_path=f'models/{ckpt_string}')

        if not self.is_conditioned:
            self.model._config.data_converter._max_tensors_per_input = None

    def generate(self, empty=False,
                 num_bars=64, temperature=0.5, backup_seq=None,
                 chord_arr=None):
        # Interpolation, Repeating Chord Progression
        if chord_arr is None:
            if backup_seq is not None:
                self.sequence = copy.deepcopy(backup_seq)

            if hasattr(self, 'temperature'):
                temperature = self.temperature
            copy_sequence = copy.deepcopy(self.sequence)

            quantized_sequence = mm.quantize_note_sequence(copy_sequence, 8)
            # infer chords for sequence is a bit more natural
            mm.infer_chords_for_sequence(quantized_sequence)

            chords = []
            for annotation in quantized_sequence.text_annotations:
                if annotation.annotation_type == CHORD_SYMBOL:
                    chord_name = annotation.text
                    chords.append(chord_name)
        else:
            # follow a user defined chord progression
            chords = chord_arr
        mod_val = len(chords)
        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([self.slerp(z1, z2, t)
                    for t in np.linspace(0, 1, num_bars)])

        seqs = [
            self.model.decode(length=TOTAL_STEPS, z=z[i:i+1, :], temperature=temperature,
                        c_input=self.chord_encoding(chords[i % mod_val]))[0]
            for i in range(num_bars)
        ]

        self.fix_instruments_for_concatenation(seqs)
        prog_ns = concatenate_sequences(seqs)
        request_dict = self.put_request_dict
        generated_sequence_2_mp3(prog_ns, f"{self.unique_id}",
                                 request_dict=request_dict)

    def trim_sequence(self, seq, num_seconds=12.0):
        seq = mm.extract_subsequence(seq, 0.0, num_seconds)
        seq.total_time = num_seconds