def predict(self, input, count, temp):
        songs_in_db_cnt = len(get_songs_by_author(self.db_name))
        to_generate = count

        for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):

            noise = np.random.normal(size=self.latent_dim)
            noise = np.expand_dims(noise, 0)
            pred = self.model.predict(noise)

            predicted = []
            for i in pred:
                for k in i:
                    index = sample(k, temp)
                    if self.mapper_list is not None:  # Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers.
                        index = self.mapper_list[index]
                    pred_note = get_key_from_value(index, self.mapper)
                    predicted.append(pred_note)

            midi_path = f'MusicVAE_{self.instrument_name}_{j}.mid'
            create_midi_with_embedded_durations(
                predicted,
                target_instrument=self.target_instrument,
                filename=midi_path)

            change_midi_instrument(midi_path, self.target_instrument)
            midi_to_wav(
                midi_path,
                f'static/songs/MusicVAE_{self.instrument_name}_{j}.wav')

            self.save_song_to_db(f'MusicVAE_{self.instrument_name}_{j}.wav')
Esempio n. 2
0
    def predict(self, input, count, temp, length=500):
        songs_in_db_cnt = len(get_songs_by_author(self.db_name))
        to_generate = count

        for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):

            generated_output = generate_notes(self.model,
                                              input,
                                              self.mapper,
                                              self.mapper_list,
                                              temp=temp,
                                              length=length,
                                              normalize=False)

            midi_path = f'Transformer_{self.instrument_name}_{j}.mid'
            create_midi_with_embedded_durations(
                generated_output,
                target_instrument=self.target_instrument,
                filename=midi_path)

            change_midi_instrument(midi_path, self.target_instrument)
            midi_to_wav(
                midi_path,
                f'static/songs/Transformer_{self.instrument_name}_{j}.wav')

            self.save_song_to_db(f'Transformer_{self.instrument_name}_{j}.wav')
Esempio n. 3
0
    def generate_music(self,
                       encoded_chord_string,
                       encoded_duration_string,
                       count,
                       length=200):
        songs_in_db_cnt = len(get_songs_by_author(self.db_name))
        to_generate = count

        for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):
            chord_states = None
            duration_states = None
            # This is an ugly hack, don't try anything like this at home.
            # chordsmc.simulate() throws an exception 90% of the time, because of floating point rounding errors, because my transition matrix is too huge.
            # A workaround is this ugly while True loop, which will spin the CPU until we get an errorless simulation.
            while chord_states is None:
                try:
                    ids, chord_states = self.chordsmc.simulate(
                        length,
                        tf=np.asarray(
                            self.chordsmc.observed_matrix).astype('float64'),
                        start=random.choice(encoded_chord_string))
                except:
                    pass

            while duration_states is None:
                try:
                    durids, duration_states = self.durationmc.simulate(
                        length,
                        tf=np.asarray(
                            self.durationmc.observed_matrix).astype('float64'),
                        start=random.choice(encoded_duration_string))
                except:
                    pass

            music = []
            musicdurations = []

            for i in chord_states:
                note = get_key_from_value(int(i), self.mapper)
                music.append(note)

            for i in duration_states:
                duration = get_key_from_value(int(i), self.duration_mapper)
                musicdurations.append(duration)

            midi_path = f'Markov_{self.instrument_name}_{j}.mid'
            create_midi_with_durations(music, musicdurations,
                                       self.target_instrument, midi_path)
            change_midi_instrument(midi_path, self.target_instrument)
            midi_to_wav(midi_path,
                        f'static/songs/Markov_{self.instrument_name}_{j}.wav')

            self.save_song_to_db(f'Markov_{self.instrument_name}_{j}.wav')
    def predict(self, input, count):
        for idx, note_sequence in enumerate(input):
            midi_path = f'GPT-2_{self.instrument_name}_{idx}.mid'
            try:
                #Exceptions can occur, because the GPT-2 model makes mistakes while generating text, resulting in invalid MIDI notes.
                create_midi_with_embedded_durations(note_sequence, filename=midi_path)

                change_midi_instrument(midi_path, self.target_instrument)
                midi_to_wav(midi_path, f'static/songs/GPT-2_{self.instrument_name}_{idx}.wav')

                self.save_song_to_db(f'GPT-2_{self.instrument_name}_{idx}.wav')

            except:
                #If an exception is the case I just ignore that sample
                pass