Пример #1
0
 def method_set_position(self, value):
     if not pybass.BASS_ChannelSetPosition(self.bass_handle, value,
                                           pybass.BASS_POS_BYTE):
         print('BASS_ChannelSetPosition error %s' %
               pybass.get_error_description(pybass.BASS_ErrorGetCode()))
Пример #2
0
def mfeats_single(path, queue=None):
    # uses mono data

    # tic = time.time()
    find_key = True
    find_gain = False
    key, bit, error = ('', 16, 0)
    path = os.path.abspath(path)
    mdx = makemdx(path)
    # key_analysis_fs = 6000
    # waveform_length = 2000
    # waveform_oversampling = 20
    # key_analysis_fs = 6000
    # waveform_length = 2500
    # waveform_oversampling = 20
    waveform_length = 10000
    waveform_oversampling = 20
    best_highlight_duration_beat = 32 * 2
    # best_highlight_duration_beat = 16 * 2
    version = MFEATS_VERSION

    init_bass_decode()
    channel = get_channel(path)
    hstream = pybass.BASS_StreamCreateFile(
        False, path, 0, 0, pybass.BASS_STREAM_DECODE
        | pybass.BASS_STREAM_PRESCAN | pybass.BASS_UNICODE)
    fs = ctypes.c_float()
    pybass.BASS_ChannelGetAttribute(hstream, pybass.BASS_ATTRIB_FREQ,
                                    ctypes.byref(fs))
    fs = int(fs.value)
    hlength = pybass.BASS_ChannelGetLength(hstream, pybass.BASS_POS_BYTE)
    duration = pybass.BASS_ChannelBytes2Seconds(hstream, hlength)
    total_frame_length = hlength / 2

    if hstream == 0:
        error = 1

    if error == 1:
        mfeats_data = MFEATS(mdx=mdx,
                             path=path,
                             date=time.time(),
                             version=version,
                             error=1)
        if queue is not None:
            queue.put(mfeats_data)
            return
        else:
            return mfeats_data

    if total_frame_length / 2 < waveform_length:
        waveform_length = total_frame_length / 2
    frame_length = int(1.0 * total_frame_length / waveform_length) * 2
    if int(frame_length) % 8 != 0:
        frame_length += (8 - int(frame_length) % 8)
    gap = total_frame_length / frame_length - waveform_length
    waveform = numpy.linspace(0, 0, int(round(waveform_length + gap)))
    highlight_raw_points = numpy.linspace(0, 0,
                                          int(round(waveform_length + gap)))
    frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)
    jump = 1.0 * frame_length / waveform_oversampling

    analyze_frame, tempo_frame, tempo_fs = ([], [], 200)

    for cnt, frame_position in enumerate(
            numpy.arange(0, total_frame_length - frame_length, frame_length)):
        pybass.BASS_ChannelGetData(
            hstream, frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
            int(frame_length * 2))

        mono_frame = deepcopy(frame_raw[::channel])
        analyze_frame += [mono_frame]
        if jump < waveform_oversampling:
            waveform[cnt] = numpy.max(numpy.abs(mono_frame))
        else:
            points = [
                numpy.max(numpy.abs(mono_frame[int(i):int(i + jump)]))
                for i in numpy.arange(0, frame_length / channel - jump, jump)
            ]
            waveform[cnt] = numpy.mean(points)
        highlight_raw_points[cnt] = numpy.mean(numpy.abs(mono_frame))

        # collect frames for long term analysis

        alength = len(analyze_frame) * len(analyze_frame[-1])
        if alength >= fs * 30 or alength * channel >= total_frame_length - frame_length:
            analyze_frame = numpy.concatenate(analyze_frame, axis=0)

            num = int(len(analyze_frame) / (1.0 * fs / tempo_fs))
            tempo_frame += [
                numpy.abs(
                    numpy.interp(
                        numpy.linspace(0,
                                       len(analyze_frame) - 1, num),
                        numpy.arange(len(analyze_frame)), analyze_frame))
            ]

            # key_frame_length = int(fs*0.25); key_frame_jump = 0.8
            # for i in range(0, len(analyze_frame)-key_frame_length,\
            #   int(key_frame_length*key_frame_jump)):
            #   spectrum = numpy.fft.fft(\
            #       analyze_frame[i:i+key_frame_length],\
            #       int(fs*key_analysis_resolution))
            #   spectrum = numpy.abs(spectrum[1:int(len(spectrum)/2)])
            #   notes = spectrum_to_note_by_max(spectrum, note_freq_div)
            #   chromagram += [note_to_chroma_by_max(notes)]
            analyze_frame = []

    # waveform

    waveform = numpy.int8(waveform / (2**8))

    # tempo analysis with tempo_frame

    # if duration > 60:
    #   tempo_frame = numpy.concatenate(tempo_frame, axis=0)
    #   tempo = get_tempo(tempo_frame, tempo_fs)
    # else: tempo = 0.0
    tempo_frame = numpy.concatenate(tempo_frame, axis=0)
    tempo = get_tempo(tempo_frame, tempo_fs)

    xtempo = tempo
    if xtempo <= 95:
        xtempo = xtempo * 2.0
    if xtempo == 0:
        until_duration = duration
    else:
        until_duration = 60.0 / xtempo * best_highlight_duration_beat

    # highlight analysis with highlight_raw_points

    jump = 1
    duratsum = []
    highlight_length = until_duration * len(highlight_raw_points) / duration
    duratsum = numpy.linspace(0, 0, len(highlight_raw_points))

    # limit_factor = 1-(duration-60.0)/duration
    # if limit_factor > 1.00: limit_factor = 0.00
    # if limit_factor < 0.25: limit_factor = 0.25
    # limithead = limit_factor
    # limittail = limit_factor
    # highlight_raw_points[:int(len(highlight_raw_points)*limithead)] = 0
    # highlight_raw_points[-int(len(highlight_raw_points)*limittail):] = 0

    limit_factor = 1 - (duration - 60.0) / duration
    if limit_factor > 1.00:
        limit_factor = 0.00
    if limit_factor < 0.25:
        limit_factor = 0.25
    htlength = int(len(highlight_raw_points) * limit_factor)
    window = numpy.hamming(htlength * 2)
    highlight_raw_points[:
                         htlength] = highlight_raw_points[:
                                                          htlength] * window[:
                                                                             htlength]
    highlight_raw_points[
        -htlength:] = highlight_raw_points[-htlength:] * window[htlength:]

    for cnt in numpy.arange(0,
                            len(highlight_raw_points) - highlight_length,
                            jump):
        thdata = numpy.mean(
            highlight_raw_points[int(cnt):int(cnt + highlight_length)])
        duratsum[int(cnt)] = thdata
    pntadd = numpy.argmax(duratsum)
    offset_time = 1.0 * jump * pntadd / (len(highlight_raw_points) / duration)
    highlight = (offset_time, until_duration)

    # autogain analysis in highlight period

    autogain = 0.4
    if find_gain:
        if duration > 60:
            autogain_analysis_length = highlight[1]
            if duration - highlight[0] < autogain_analysis_length:
                autogain_analysis_length = duration - highlight[0]
            frame_length = fs * channel * autogain_analysis_length
            byte_position = pybass.BASS_ChannelSeconds2Bytes(
                hstream, highlight[0])
            pybass.BASS_ChannelSetPosition(hstream, byte_position, False)
            frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)

            pybass.BASS_ChannelGetData(
                hstream,
                frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
                int(frame_length * 2))

            mono_frame = frame_raw[::channel] / 32768.0
            mono_frame = fir_filter(mono_frame,
                                    lowcut=500,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=15)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=1000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=7)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=1000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=5)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=5000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=5)
            if fs / 2 > 21000:
                mono_frame = fir_filter(mono_frame,
                                        lowcut=0,
                                        highcut=20000,
                                        fs=fs,
                                        order=45)
                mono_frame += fir_filter(
                    mono_frame, lowcut=15000, highcut=fs / 2, fs=fs,
                    order=5) * 0.5
            rms = numpy.mean(mono_frame**2)**0.5 * 3

            # spectrum = numpy.fft.fft(mono_frame, fs)
            # spectrum = numpy.abs(spectrum[1:int(len(spectrum)/2)])
            # pylab.plot(spectrum); pylab.show()

            autogain = 0.14 / rms

    # key analysis in highlight period

    key = ''
    if find_key:
        chromagram, resolution = ([], 1.0)
        note_freq_div = get_note_freq_div(resolution)
        # note_window = get_note_window(fs, resolution, note_freq_div)
        if xtempo == 0:
            frame_length = int(fs * channel * 0.5)
        else:
            frame_length = int(fs * channel * (60.0 / xtempo))

        offset_position, until_position = (highlight[0],
                                           fs * channel * until_duration * 2)

        if frame_length > total_frame_length:
            frame_length = total_frame_length - 1

        if offset_position + until_position > total_frame_length:
            until_position = total_frame_length - offset_position

        frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)
        byte_position = pybass.BASS_ChannelSeconds2Bytes(
            hstream, offset_position)
        pybass.BASS_ChannelSetPosition(hstream, byte_position, False)

        for cnt, frame_position in enumerate(
                numpy.arange(0, total_frame_length - frame_length,
                             frame_length)):

            pybass.BASS_ChannelGetData(
                hstream,
                frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
                int(frame_length * 2))

            mono_frame = frame_raw[::channel] / 32768.0
            spectrum = numpy.fft.fft(mono_frame, int(fs * resolution))
            spectrum = numpy.abs(spectrum[1:int(len(spectrum) / 2)])

            if find_key:

                notes = spectrum_to_note_by_max(spectrum, note_freq_div)
                chromagram += [note_to_chroma_by_max(notes)]

            if (cnt + 1) * frame_length >= until_position:
                break

        scored_keys, key_scores, key_counts = ([], [0] * 24, [0] * 24)
        for chroma in chromagram:
            lag, score = get_chord_binaries_correlation_lag_score(chroma)
            scored_keys += [lag]
            key_counts[lag] += 1
            key_scores[lag] += score
        key_scores = numpy.array(key_scores)
        max_key_scores = max(key_scores)
        if max_key_scores == 0.0:
            key = ''
        else:
            key_scores = key_scores / max_key_scores * 100
            scored_key_idx = []
            for i in range(1):
                value, pnt = find_max(key_scores)
                if value < 50:
                    break
                scored_key_idx += [pnt[0]]
                key_scores[pnt[0]] = 0
            string_keys = []
            for i in range(len(scored_key_idx) - 1, -1, -1):
                if scored_key_idx[i] - 12 in scored_key_idx:
                    scored_key_idx.pop(i)
                    continue
                elif scored_key_idx[i] + 12 in scored_key_idx:
                    scored_key_idx.pop(i)
                    continue
                string_keys += [chord_idx_to_char(scored_key_idx[i])]
            string_keys = ' or '.join(string_keys)
            key = '%s' % (string_keys)

    # chromagram = numpy.array(chromagram).T
    # chromagram = numpy.flipud(chromagram)
    # pylab.imshow(chromagram, interpolation='nearest')
    # pylab.show() # pylab.grid(True)

    # md5 = file2md5(upath)

    tempo_type, key_type, save_tag = open_shelves(
        ('tempo_restict_type', 'key_format_type', 'auto_save_tag'),
        PREFERENCE_DB)

    if tempo_type is None or tempo_type == 0:
        pass
    elif tempo_type == 1 and tempo <= 95:
        tempo = tempo * 2.0
    elif tempo_type == 2 and tempo >= 120:
        tempo = tempo * 0.5
    if key_type is None:
        key_type = 1
    key = convert_chord_type(key, key_type)
    if save_tag is None:
        save_tag = False
    if save_tag:
        mutagen_mp3 = mutagen.mp3.MP3(path)
        mutagen_mp3['TKEY'] = mutagen.id3.TKEY(encoding=3, text=[key])
        mutagen_mp3['TBPM'] = mutagen.id3.TBPM(encoding=3, text=[tempo])
        mutagen_mp3.save()

    mfeats_data = MFEATS(mdx=mdx,
                         path=path,
                         key=key,
                         tempo=tempo,
                         duration=duration,
                         highlight=highlight,
                         waveform=waveform,
                         date=time.time(),
                         version=version,
                         channel=channel,
                         bit=bit,
                         error=0,
                         autogain=autogain)

    # pybass.BASS_Free()
    if pybass.BASS_ChannelIsActive(hstream) == 1:
        pybass.BASS_StreamFree(hstream)

    # print 'mfeats_single_finished: elapsed_time: %03.06fsec: %03.06fmsec/onesec'\
    #   % (time.time()-tic, (time.time()-tic)/duration*1000)
    # return mfeats_data

    if queue is not None:
        queue.put(mfeats_data)
        return
    else:
        return mfeats_data