Пример #1
0
def main(input_filename, songname, format, counter):
    """
    Calculate the fingerprint hashses of the referenced audio file and save
    to disk as a pickle file
    """

    # open the file & convert to wav
    song_data = AudioSegment.from_file(input_filename, format=format)
    song_data = song_data.set_channels(1)  # convert to mono
    wav_tmp = song_data.export(format="wav")  # write to a tmp file buffer
    wav_tmp.seek(0)
    rate, wav_data = wavfile.read(wav_tmp)

    # extract peaks and compute constellation hashes & offsets
    peaks = resound.get_peaks(np.array(wav_data))
    fingerprints = list(resound.hashes(peaks))  # hash, offset pairs

    if not fingerprints:
        raise RuntimeError("No fingerprints detected in source file - check your parameters passed to Resound.")

    # Combine duplicate keys
    for fp, abs_offset in fingerprints:
        counter[fp].append((abs_offset, songname))

    print "    Identified {} keypoints in '{}'.".format(len(counter), songname)

    return counter
Пример #2
0
def main(input_filename, format):
    """
    Calculate the fingerprint hashses of the referenced audio file and save
    to disk as a pickle file
    """

    # open the file & convert to wav
    song_data = AudioSegment.from_file(input_filename, format=format)
    song_data = song_data.set_channels(1)  # convert to mono
    wav_tmp = song_data.export(format="wav")  # write to a tmp file buffer
    wav_tmp.seek(0)
    rate, wav_data = wavfile.read(wav_tmp)

    rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE

    # Calculate a coarser window for matching
    window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
    peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)

    # half width (nyquist freq) & half size (window is +/- around the middle)
    f_width = WIDTH // (2 * FREQ_STRIDE) * 2
    t_gap = 1 * rows_per_second
    t_width = 2 * rows_per_second
    fingerprints = resound.hashes(peaks,
                                  f_width=f_width,
                                  t_gap=t_gap,
                                  t_width=t_width)  # hash, offset pairs

    return fingerprints
Пример #3
0
 def test_fingerprints(self):
     """
     fingerprint keys should be of type<long> (native python type) for
     compatibility with app engine datastore
     """
     fpg = resound.hashes(self.data, 44100)
     self.assertIsInstance(next(fpg)[0], long)
Пример #4
0
def main(input_filename, format):
    """
    Calculate the fingerprint hashses of the referenced audio file and save
    to disk as a pickle file
    """

    # open the file & convert to wav
    song_data = AudioSegment.from_file(input_filename, format=format)
    song_data = song_data.set_channels(1)  # convert to mono
    wav_tmp = song_data.export(format="wav")  # write to a tmp file buffer
    wav_tmp.seek(0)
    rate, wav_data = wavfile.read(wav_tmp)

    rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE

    # Calculate a coarser window for matching
    window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
    peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)

    # half width (nyquist freq) & half size (window is +/- around the middle)
    f_width = WIDTH // (2 * FREQ_STRIDE) * 2
    t_gap = 1 * rows_per_second
    t_width = 2 * rows_per_second
    fingerprints = resound.hashes(peaks, f_width=f_width, t_gap=t_gap, t_width=t_width)  # hash, offset pairs

    return fingerprints
Пример #5
0
    def post(self):
        """
        Find the best matching song id in response to POST requests containing
        a file-like object with valid WAV encoding in the request body by
        correlating hashes and relative offsets from the WAV data with
        previously-computed hash records.
        """
        request_file = self.request.body_file.file
        rate, src_audio = wavfile.read(request_file)
        votes = defaultdict(lambda: 0)

        hashes = list(resound.hashes(src_audio, rate))
        keys = [Key(Hashes, h_id) for h_id, _ in hashes]

        futures = ndb.get_multi_async(keys)
        for (h_id, offset), future in zip(hashes, futures):
            entity = future.get_result()  # wait for response from each key

            if not entity:
                continue

            for song_id, abs_offset in entity.song_list:
                delta = abs_offset - offset
                votes[(song_id, delta)] += 1

        # Find the best match
        max_votes, best_id = 0, None
        p_votes, prev = 0, None
        s_votes, prev_2 = 0, None
        for (song_id, _), vote_count in votes.iteritems():
            if max_votes < vote_count:
                max_votes, p_votes, s_votes = vote_count, max_votes, p_votes
                best_id, prev, prev_2 = song_id, best_id, prev
            elif p_votes < vote_count:
                p_votes, s_votes = vote_count, p_votes
                prev, prev_2 = song_id, prev
            elif s_votes < vote_count:
                s_votes = vote_count
                prev_2 = song_id

        msg = "Best ids:\n1. {} - {}\n2. {} - {}\n3. {} - {}"
        logging.debug(
            msg.format(best_id, max_votes, prev, p_votes, prev_2, s_votes))

        if max_votes > MIN_MATCH_THRESHOLD:
            key = Key(Songs, best_id)
            song = key.get()
            self.response.write(
                json.dumps({
                    'artist': song.artist,
                    'title': song.title,
                    'year': song.year
                }))
Пример #6
0
    def post(self):
        """
        Find the best matching song id in response to POST requests containing
        a file-like object with valid WAV encoding in the request body by
        correlating hashes and relative offsets from the WAV data with
        previously-computed hash records.
        """
        request_file = self.request.body_file.file
        rate, src_audio = wavfile.read(request_file)
        votes = defaultdict(lambda: 0)

        hashes = list(resound.hashes(src_audio, rate))
        keys = [Key(Hashes, h_id) for h_id, _ in hashes]

        futures = ndb.get_multi_async(keys)
        for (h_id, offset), future in zip(hashes, futures):
            entity = future.get_result()  # wait for response from each key

            if not entity:
                continue

            for song_id, abs_offset in entity.song_list:
                delta = abs_offset - offset
                votes[(song_id, delta)] += 1

        # Find the best match
        max_votes, best_id = 0, None
        p_votes, prev = 0, None
        s_votes, prev_2 = 0, None
        for (song_id, _), vote_count in votes.iteritems():
            if max_votes < vote_count:
                max_votes, p_votes, s_votes = vote_count, max_votes, p_votes
                best_id, prev, prev_2 = song_id, best_id, prev
            elif p_votes < vote_count:
                p_votes, s_votes = vote_count, p_votes
                prev, prev_2 = song_id, prev
            elif s_votes < vote_count:
                s_votes = vote_count
                prev_2 = song_id

        msg = "Best ids:\n1. {} - {}\n2. {} - {}\n3. {} - {}"
        logging.debug(msg.format(best_id, max_votes,
                                 prev, p_votes,
                                 prev_2, s_votes))

        if max_votes > MIN_MATCH_THRESHOLD:
            key = Key(Songs, best_id)
            song = key.get()
            self.response.write(json.dumps({'artist': song.artist,
                                            'title': song.title,
                                            'year': song.year}))
Пример #7
0
def getNewSongFingerprint():
    #fingerprints the unknown song to be identified
    sample_rate, data = wavfile.read('unknown.wav')
    return resound.hashes(data, freq=sample_rate))
Пример #8
0
def fingerprintDirectory():
    #creates fingerprints for all .wav files in directory
    for f in onlyfiles:
        sample_rate, data = wavfile.read(f)
        hashes = list(resound.hashes(data, freq=sample_rate))