示例#1
0
    def generate_music(self,
                       encoded_chord_string,
                       encoded_duration_string,
                       count,
                       length=200):
        songs_in_db_cnt = len(get_songs_by_author(self.db_name))
        to_generate = count

        for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):
            chord_states = None
            duration_states = None
            # This is an ugly hack, don't try anything like this at home.
            # chordsmc.simulate() throws an exception 90% of the time, because of floating point rounding errors, because my transition matrix is too huge.
            # A workaround is this ugly while True loop, which will spin the CPU until we get an errorless simulation.
            while chord_states is None:
                try:
                    ids, chord_states = self.chordsmc.simulate(
                        length,
                        tf=np.asarray(
                            self.chordsmc.observed_matrix).astype('float64'),
                        start=random.choice(encoded_chord_string))
                except:
                    pass

            while duration_states is None:
                try:
                    durids, duration_states = self.durationmc.simulate(
                        length,
                        tf=np.asarray(
                            self.durationmc.observed_matrix).astype('float64'),
                        start=random.choice(encoded_duration_string))
                except:
                    pass

            music = []
            musicdurations = []

            for i in chord_states:
                note = get_key_from_value(int(i), self.mapper)
                music.append(note)

            for i in duration_states:
                duration = get_key_from_value(int(i), self.duration_mapper)
                musicdurations.append(duration)

            midi_path = f'Markov_{self.instrument_name}_{j}.mid'
            create_midi_with_durations(music, musicdurations,
                                       self.target_instrument, midi_path)
            change_midi_instrument(midi_path, self.target_instrument)
            midi_to_wav(midi_path,
                        f'static/songs/Markov_{self.instrument_name}_{j}.wav')

            self.save_song_to_db(f'Markov_{self.instrument_name}_{j}.wav')
    def predict(self, input, count, temp):
        songs_in_db_cnt = len(get_songs_by_author(self.db_name))
        to_generate = count

        for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):

            noise = np.random.normal(size=self.latent_dim)
            noise = np.expand_dims(noise, 0)
            pred = self.model.predict(noise)

            predicted = []
            for i in pred:
                for k in i:
                    index = sample(k, temp)
                    if self.mapper_list is not None:  # Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers.
                        index = self.mapper_list[index]
                    pred_note = get_key_from_value(index, self.mapper)
                    predicted.append(pred_note)

            midi_path = f'MusicVAE_{self.instrument_name}_{j}.mid'
            create_midi_with_embedded_durations(
                predicted,
                target_instrument=self.target_instrument,
                filename=midi_path)

            change_midi_instrument(midi_path, self.target_instrument)
            midi_to_wav(
                midi_path,
                f'static/songs/MusicVAE_{self.instrument_name}_{j}.wav')

            self.save_song_to_db(f'MusicVAE_{self.instrument_name}_{j}.wav')
def decode_chords_using_mapper(numbers, mapper):
    outputnotes = []
    for number in numbers:
        outputnotes.append(
            chord_from_string(
                get_notes_from_chord(get_key_from_value(number, mapper))))

    return outputnotes
def generate_notes(model,
                   network_input,
                   mapper,
                   mapperlist=None,
                   temp=1.0,
                   length=500,
                   normalize=True,
                   random_start=True):

    if random_start:
        start = np.random.randint(0, len(network_input) - 1)
        pattern = network_input[start]
    else:
        pattern = network_input

    prediction_output = []

    for note_index in range(length):
        prediction_input = np.reshape(pattern, (1, len(pattern), 1))

        prediction = model.predict(prediction_input, verbose=0)
        index = sample(prediction, temp)

        if mapperlist is not None:  #Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers.
            index = mapperlist[index]

        result = get_key_from_value(index, mapper)
        prediction_output.append(result)

        if normalize:
            index = index / float(len(mapper))
        pattern = np.append(pattern, index)

        pattern = pattern[1:len(pattern)]

    return prediction_output
def generate_multi_instrument_notes(model,
                                    starting_slice,
                                    starting_duration,
                                    starting_bass_slice,
                                    starting_drum_slice,
                                    mapper,
                                    duration_mapper,
                                    bass_mapper,
                                    drum_mapper,
                                    mapperlist=None,
                                    duration_mapper_list=None,
                                    temp=1.0,
                                    duration_temp=0.8,
                                    bass_temp=0.8,
                                    drum_temp=0.8,
                                    length=500):

    pattern = starting_slice
    duration_pattern = starting_duration
    bass_pattern = starting_bass_slice
    drum_pattern = starting_drum_slice
    prediction_output_notes = []
    prediction_output_durations = []
    prediction_output = []
    prediction_output_bass = []
    prediction_output_bass_ret = []
    prediction_output_drum = []
    prediction_output_drum_ret = []

    for note_index in range(length):
        prediction_input = np.reshape(pattern, (1, len(pattern), 1))
        prediction_duration_input = np.reshape(duration_pattern,
                                               (1, len(duration_pattern), 1))
        prediction_bass_input = np.reshape(bass_pattern,
                                           (1, len(bass_pattern), 1))
        prediction_drum_input = np.reshape(drum_pattern,
                                           (1, len(drum_pattern), 1))

        note_prediction, duration_prediction, bass_prediction, drum_prediction = model.predict(
            {
                "notes_in": prediction_input,
                "durations_in": prediction_duration_input,
                "bass_in": prediction_bass_input,
                "drum_in": prediction_drum_input
            })

        # prediction = sample(prediction, temp)
        index = sample(note_prediction, temp)
        duration_index = sample(duration_prediction, duration_temp)
        bass_index = sample(bass_prediction, bass_temp)
        drum_index = sample(drum_prediction, drum_temp)

        # index = np.argmax(prediction)
        if mapperlist is not None:  # Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers.
            index = mapperlist[index]

        if duration_mapper_list is not None:
            duration_index = duration_mapper_list[duration_index]

        result = get_key_from_value(index, mapper)
        prediction_output_notes.append(result)

        duration_result = get_key_from_value(duration_index, duration_mapper)
        prediction_output_durations.append(duration_result)

        bass_result = get_key_from_value(bass_index, bass_mapper)
        prediction_output_bass.append(bass_result)

        drum_result = get_key_from_value(drum_index, drum_mapper)
        prediction_output_drum.append(drum_result)

        pattern = np.append(pattern, index / float(len(mapper)))
        duration_pattern = np.append(
            duration_pattern, duration_index / float(len(duration_mapper)))
        bass_pattern = np.append(bass_pattern,
                                 bass_index / float(len(bass_mapper)))
        drum_pattern = np.append(drum_pattern,
                                 drum_index / float(len(drum_mapper)))

        pattern = pattern[1:len(pattern)]
        duration_pattern = duration_pattern[1:len(duration_pattern)]
        bass_pattern = bass_pattern[1:len(bass_pattern)]
        drum_pattern = drum_pattern[1:len(drum_pattern)]

    for i in range(length):
        prediction_output.append(
            str(prediction_output_notes[i]) + ';' +
            str(prediction_output_durations[i]))
        prediction_output_bass_ret.append(
            str(prediction_output_bass[i]) + ';' +
            str(prediction_output_durations[i]))
        prediction_output_drum_ret.append(
            str(prediction_output_drum[i]) + ';' +
            str(prediction_output_durations[i]))

    return prediction_output, prediction_output_bass_ret, prediction_output_drum_ret