Exemplo n.º 1
0
    def getSongOptionsInKey(self, key, scale):
        '''
			Returns a subset of the unplayed songs that are as much in key as possible with the given key and scale,
			and that attempt to build up towards the goal song its key and scale.
			The number of songs in the subset is at least NUM_SONGS_IN_KEY_MINIMUM
		'''
        songs_in_key = []
        songs_unplayed = self.songsUnplayed
        keys_added = []

        def addSongsInKey(key, scale):
            if (key, scale) not in keys_added:
                titles = self.song_collection.get_titles_in_key(key, scale)
                songs_to_add = [s for s in songs_unplayed if s.title in titles]
                songs_in_key.extend(songs_to_add)
                #~ logger.debug('{} songs in {} {} added'.format(len(songs_to_add), key, scale))
                keys_added.append((key, scale))

        closely_related_keys = songcollection.get_closely_related_keys(
            key, scale)
        for key_, scale_ in closely_related_keys:
            addSongsInKey(key_, scale_)
            # Also add keys one semitone higher or lower: these can be pitch shifted
            #~ key_to_add, scale_to_add = songcollection.get_key_transposed(key_, scale_, 1)
            #~ addSongsInKey(key_to_add, scale_to_add)
            key_to_add, scale_to_add = songcollection.get_key_transposed(
                key_, scale_, -1)
            addSongsInKey(key_to_add, scale_to_add)

        # 4: if still not enough songs, add random songs
        if len(songs_in_key) == 0:
            logger.debug('Not enough songs in pool, adding all songs!')
            songs_in_key = self.songsUnplayed
        return np.array(songs_in_key)
Exemplo n.º 2
0
    def getBestNextSongAndCrossfade(self, master_song, master_cue,
                                    master_fade_in_len, fade_out_len,
                                    fade_type):
        '''
			Choose a song that overlaps best with the given song
			The type of transition is also given (rolling, double drop, chill).
		'''
        transition_length = master_fade_in_len + fade_out_len

        # 1. Select songs that are similar in key and that build up towards the goal song
        key, scale = songcollection.get_key_transposed(master_song.key,
                                                       master_song.scale,
                                                       self.semitone_offset)
        song_options = self.getSongOptionsInKey(key, scale)
        closely_related_keys = songcollection.get_closely_related_keys(
            key, scale)

        # 2. Filter the songs in key based on their distance to the centroid
        song_options = self.filterSongOptionsByThemeDistance(
            song_options, master_song)
        #~ song_options = np.random.choice(song_options, size=NUM_SONGS_ONSETS)

        # 3. Filter based on vocal activity and ODF overlap
        master_song.open()
        best_score = np.inf
        best_score_clash = np.inf
        best_song = None
        logger.debug('Selected songs, evaluated by ODF similarity: ')
        for s in song_options:
            # Open the song
            next_song = s
            next_song.open()

            # Determine the queue points for the current song
            queue_slave, fade_in_len = getSlaveQueue(
                next_song,
                fade_type,
                min_playable_length=transition_length + 16)
            fade_in_len = min(fade_in_len, master_fade_in_len)
            fade_in_len_correction = master_fade_in_len - fade_in_len
            master_cue_corr = master_cue + fade_in_len_correction
            transition_len_corr = transition_length - fade_in_len_correction
            queue_slave = queue_slave - fade_in_len

            # Construct the cross-fade for this transition
            if queue_slave >= 16:
                cf = songtransitions.CrossFade(0, [queue_slave],
                                               transition_len_corr,
                                               fade_in_len, fade_type)
            else:
                cf = songtransitions.CrossFade(0, [queue_slave],
                                               transition_len_corr,
                                               fade_in_len, fade_type)

            # Iterate over the different options for queue_slave
            for queue_slave_cur in cf.queue_2_options:

                # Split the overlapping portions of the onset curves in segments of 4 downbeats
                # and calculate the similarities. The most dissimilar segment indicates the overall quality of the crossfade

                odf_segment_len = 4  # dbeats
                odf_scores = []
                for odf_start_dbeat in range(0, transition_len_corr,
                                             odf_segment_len):
                    odf_master = master_song.getOnsetCurveFragment(
                        master_cue_corr + odf_start_dbeat,
                        min(
                            master_cue_corr + odf_start_dbeat +
                            odf_segment_len,
                            master_cue_corr + transition_len_corr))
                    odf_slave = s.getOnsetCurveFragment(
                        queue_slave_cur + odf_start_dbeat,
                        min(
                            queue_slave_cur + odf_start_dbeat +
                            odf_segment_len,
                            queue_slave_cur + transition_len_corr))
                    onset_similarity = calculateOnsetSimilarity(
                        odf_master, odf_slave) / odf_segment_len
                    odf_scores.append(onset_similarity)

                singing_scores = []
                singing_master = np.array(
                    master_song.singing_voice[master_cue_corr:master_cue_corr +
                                              transition_len_corr] > 0)
                singing_slave = np.array(
                    s.singing_voice[queue_slave:queue_slave +
                                    transition_len_corr] > 0)
                singing_clash = is_vocal_clash_pred(singing_master,
                                                    singing_slave)

                onset_similarity = np.average(odf_scores)
                score = onset_similarity

                if score < best_score and not singing_clash:
                    best_song = next_song
                    best_score = score
                    best_fade_in_len = fade_in_len
                    best_slave_cue = queue_slave_cur
                    best_master_cue = master_cue_corr
                elif best_score == np.inf and score < best_score_clash and singing_clash:
                    best_song_clash = next_song
                    best_score_clash = score
                    best_fade_in_len_clash = fade_in_len
                    best_slave_cue_clash = queue_slave_cur
                    best_master_cue_clash = master_cue_corr

                type_fade_dbg_str = '>> {:20s} [{}:{:3d}]: ODF {:.2f} {}'.format(
                    next_song.title[:20], fade_type, queue_slave_cur, score,
                    '' if not singing_clash else '>>CLASH<<')

                # Logging
                logger.debug(type_fade_dbg_str)

        if best_song is None:
            # No best song without vocal clash was found: use the clashing version instead as a last resort
            best_song = best_song_clash
            best_score = best_score_clash
            best_fade_in_len = best_fade_in_len_clash
            best_slave_cue = best_slave_cue_clash
            best_master_cue = best_master_cue_clash

        # Determine the pitch shifting factor for the next song
        key_distance = abs(
            songcollection.distance_keys_semitones(key, best_song.key))
        if (best_song.key, best_song.scale) not in closely_related_keys:
            # This song has been shifted one semitone up or down: this has to be compensated by means of pitch shifting
            shifted_key_up, shifted_scale_up = songcollection.get_key_transposed(
                best_song.key, best_song.scale, 1)
            if (shifted_key_up, shifted_scale_up) in closely_related_keys:
                self.semitone_offset = 1
            else:
                self.semitone_offset = -1
            logger.debug('Pitch shifting! {} {} by {} semitones'.format(
                best_song.key, best_song.scale, self.semitone_offset))
        else:
            self.semitone_offset = 0

        self.prev_song_theme_descriptor = master_song.song_theme_descriptor
        self.songsPlayed.append(best_song)
        self.songsUnplayed.remove(best_song)
        if len(
                self.songsUnplayed
        ) <= NUM_SONGS_IN_KEY_MINIMUM:  # If there are too few songs remaining, then restart
            logger.debug('Replenishing song pool')
            self.songsUnplayed.extend(self.songsPlayed)
            self.songsPlayed = []

        return best_song, best_slave_cue, best_master_cue, best_fade_in_len, self.semitone_offset