Ejemplo n.º 1
0
    def combine_same_pitch_for_length(self):
        """
        Given a list of note segments such as
        [{rhythm: 1/32, pitches: C}, {rhythm: 1/32, pitches: C}],
        combines them into a single {rhythm: 1/16, pitches: C} because
        the note found spans more than a single segment.
        """
        last_encountered = ""
        sum_rhythm = 0

        for note in self.notes:
            current_song = self.notes[note]
            for n in current_song["notes"]:
                curr_note, curr_rhythm = n["pitches"][0][0], n["rhythm"]
                # Check if the segment is a series of rests or not
                # If so, treat it like a note change for mapping
                if n["amplitude"] < current_song["loudness"] * self.REST_THRESH:
                    curr_note = "REST"
                if last_encountered is not "" \
                        and curr_note is not last_encountered:
                    # A note change occurred, record the note and summed rhythm
                    self.note_list.append((last_encountered, sum_rhythm))
                    sum_rhythm = curr_rhythm
                else:
                    if not sum_rhythm:
                        sum_rhythm = curr_rhythm
                    else:
                        sum_rhythm = utils.round_note_length_base2(
                            sum_rhythm + curr_rhythm)
                last_encountered = curr_note

            self.note_list.append((last_encountered, sum_rhythm))
Ejemplo n.º 2
0
    def find_pitch_sequences(self):
        """
        Populates the learner with information about the individual segments
        of the song, including the most likely identified pitch and how long
        the segment lasted for in terms of note-length.
        """
        for song in self.songs:
            bpm = song["bpm"]["value"]
            data = song["data"]
            time = song["time"]["value"]
            temp_song_segs = []
            # Modularize all notes to be in the key of C,
            # we will use this to shift notes later.
            key = data.analysis.key["value"]

            for seg in data.analysis.beats:
                rhythm = utils.round_note_length_base2(
                    float(seg.duration) / bpm * SECONDS_PER_MIN)

                likely_pitches = sorted(enumerate(seg.mean_pitches()),
                                        key=lambda x: x[1])
                pitches = []
                for a in likely_pitches:
                    transposed = a[0] - key
                    if transposed < 0:
                        transposed = 12 + transposed
                    pitches.append((self.PITCH_DICTIONARY[transposed], a[1]))

                temp_song_segs.append({"count": seg.absolute_context()[0],
                                       "rhythm": rhythm, "pitches": pitches,
                                       "amplitude": seg.mean_loudness()})

            self.notes[song["file"]] = {
                "notes": temp_song_segs,
                "loudness": data.analysis.loudness
            }