Beispiel #1
0
	def method_free_handle(self):
		if self.bass_handle:
			channel_info = self.method_get_channel_info()
			if channel_info.ctype >= pybass.BASS_CTYPE_MUSIC_MOD:
				if not pybass.BASS_MusicFree(self.bass_handle):
					print 'BASS_MusicFree error', pybass.get_error_description(pybass.BASS_ErrorGetCode())
				else:
					self.bass_handle = 0
			elif channel_info.ctype >= pybass.BASS_CTYPE_STREAM:
				if not pybass.BASS_StreamFree(self.bass_handle):
					print 'BASS_StreamFree error', pybass.get_error_description(pybass.BASS_ErrorGetCode())
				else:
					self.bass_handle = 0
Beispiel #2
0
#const char*  _stdcall TAGS_Read( DWORD dwHandle, const char* fmt );
TAGS_Read = func_type(ctypes.c_char_p, ctypes.c_ulong,
                      ctypes.c_char_p)(('TAGS_Read', tags_module))

# retrieves the current version
#DWORD _stdcall TAGS_GetVersion();
TAGS_GetVersion = func_type(ctypes.c_ulong)(('TAGS_GetVersion', tags_module))

if __name__ == "__main__":
    print 'TAGS implemented Version', TAGS_VERSION
    print 'TAGS real Version', TAGS_GetVersion()
    import pybass
    if not pybass.BASS_Init(-1, 44100, 0, 0, 0):
        print 'BASS_Init error', pybass.get_error_description(
            pybass.BASS_ErrorGetCode())
    else:
        handle = pybass.BASS_StreamCreateFile(False, 'test.ogg', 0, 0, 0)
        if handle == 0:
            print 'BASS_StreamCreateFile error', pybass.get_error_description(
                pybass.BASS_ErrorGetCode())
        else:
            fmt = '%IFV1(%ITRM(%TRCK),%ITRM(%TRCK). )%IFV2(%ITRM(%ARTI),%ICAP(%ITRM(%ARTI)),no artist) - %IFV2(%ITRM(%TITL),%ICAP(%ITRM(%TITL)),no title)%IFV1(%ITRM(%ALBM), - %IUPC(%ITRM(%ALBM)))%IFV1(%YEAR, %(%YEAR%))%IFV1(%ITRM(%GNRE), {%ITRM(%GNRE)})%IFV1(%ITRM(%CMNT), [%ITRM(%CMNT)])'
            tags = TAGS_Read(handle, fmt)
            print tags
            if not pybass.BASS_StreamFree(handle):
                print 'BASS_StreamFree error', pybass.get_error_description(
                    pybass.BASS_ErrorGetCode())
        if not pybass.BASS_Free():
            print 'BASS_Free error', pybass.get_error_description(
                pybass.BASS_ErrorGetCode())
Beispiel #3
0
def freehandle(handle):
    """
Free a handle
    """
    return dll.BASS_StreamFree(handle)
Beispiel #4
0
 def Quit(self):
     self.loop = False
     if pybass.BASS_ChannelIsActive(self.hStream) == 1:
         pybass.BASS_StreamFree(self.hStream)
Beispiel #5
0
    def InitAudio(self):
        ######
        # import ctypes
        # from packages.pybassex import pybassex
        # ex = pybassex()
        # path = 'C:\\Users\\tkmix\\Desktop\\WORK\\macrobox-player\\source\\packages\\bass_vst.dll'
        # bass_module = ctypes.WinDLL(path)
        # func_type = ctypes.WINFUNCTYPE
        # QWORD = ctypes.c_int64
        # HSTREAM = ctypes.c_ulong
        # BASS_VST_ChannelSetDSP = func_type(
        #     ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_int64, ctypes.c_ulong)(('BASS_VST_ChannelSetDSP', bass_module))
        # BASS_VST_GetParam = func_type(
        #     ctypes.c_bool, HSTREAM, ctypes.c_int64)(('BASS_VST_GetParam', bass_module))
        # # BASS_VST_SetParam = func_type(
        # #     ctypes.c_bool, HSTREAM, ctypes.c_int64, ctypes.c_float)(('BASS_VST_SetParam', bass_module))
        # BASS_VST_SetParam = func_type(
        #     ctypes.c_bool, HSTREAM, ctypes.c_int64, ctypes.c_float)(('BASS_VST_SetParam', bass_module))

        # BASS_VST_EmbedEditor = func_type(
        #     ctypes.c_bool, HSTREAM, ctypes.c_int64)(('BASS_VST_EmbedEditor', bass_module))
        # BASS_VST_SetScope = func_type(
        #     ctypes.c_bool, HSTREAM, ctypes.c_int64)(('BASS_VST_SetScope', bass_module))
        # BASS_VST_GetInfo = func_type(
        #     HSTREAM, ctypes.c_ulong)(('BASS_VST_GetInfo', bass_module))
        ######

        self.parent.parent.ListBox.List.pending.SkipStopIcon = True
        if self.path == self.parent.cue.path:
            is_position_set = True
        else:
            is_position_set = False
        self.path = self.parent.cue.path
        if pybass.BASS_ChannelIsActive(self.hStream) == 1:
            pybass.BASS_StreamFree(self.hStream)
        if sys.platform.startswith('win'):
            flags = pybass.BASS_STREAM_PRESCAN | pybass.BASS_UNICODE
        elif sys.platform.startswith('darwin'):
            flags = pybass.BASS_STREAM_PRESCAN
            self.path = self.path.encode(sys.getfilesystemencoding())

        self.hStream = pybass.BASS_StreamCreateFile(False, self.path, 0, 0,
                                                    flags)

        ######
        # print(dir(pybass))
        # from pybass import pybass_vst
        vst_plugin_name = 'LoudMax64.dll'
        vst_plugin_name = 'LoudMaxLite64.dll'
        # vst_plugin_path = os.path.join(os.path.dirname(__file__), 'packages', vst_plugin_name)
        vst_plugin_path = os.path.join('assets', 'dlls', vst_plugin_name)
        if hasattr(sys, '_MEIPASS'):
            vst_plugin_path = os.path.join(sys._MEIPASS, vst_plugin_path)
        else:
            vst_plugin_path = os.path.join(
                os.path.dirname(os.path.dirname(__file__)), vst_plugin_path)
        # BASS_VST_KEEP_CHANS = 0x00000001
        flags = pybass.BASS_UNICODE | pybass.BASS_VST_KEEP_CHANS
        self.vstHandle = pybass.BASS_VST_ChannelSetDSP(self.hStream,
                                                       vst_plugin_path, flags,
                                                       0)
        pybass.BASS_VST_SetParam(self.vstHandle, 0, 0.0)
        pybass.BASS_VST_SetParam(self.vstHandle, 1, 1.0)
        pybass.BASS_VST_SetParam(self.vstHandle, 2, 0.0)
        pybass.BASS_VST_SetParam(self.vstHandle, 3, 0.0)
        # print(os.path.join(os.path.dirname(__file__), 'packages', 'LoudMax64.dll'))
        # self.parent.Show()
        # x = BASS_VST_SetScope(self.vstHandle, 123)
        # dialog = wx.TextEntryDialog(self.parent.parent.parent, 'Enter Your Name', 'Text Entry Dialog')
        # BASS_VST_EmbedEditor(self.vstHandle, dialog.GetHandle())
        # dialog.ShowModal()
        # if dialog.ShowModal() == wx.ID_OK:
        #     self.text.SetValue('Name entered:' + dialog.GetValue())
        # dialog.Destroy()

        # BASS_VST_EmbedEditor(self.vstHandle, self.parent.GetHandle())
        # print()

        # param = BASS_VST_GetParam(self.vstHandle, 0)
        # info = None
        # BASS_VST_SetParam(self.vstHandle, 1, 1.0)

        # print(param)
        # param = BASS_VST_GetParam(self.vstHandle, 1)
        # print(param)
        ######

        self.parent.cue.hStream = self.hStream
        audio.set_volume(self.hStream, 0.0)
        if self.resume is not None:
            resume = self.resume
            if self.resume < 0:
                duration = audio.get_duration(self.hStream)
                resume = duration + self.resume
            audio.set_position(self.hStream, resume)
        pybass.BASS_ChannelPlay(self.hStream, False)

        self.fadein.cnt = self.fadein.time
        if is_position_set is False and self.parent.IsLoopOn():
            self.fadein.cnt = self.fadein.time
        else:
            self.parent.SetVolume()
        self.resume = None
        self.pending = False
        # self.parent.FocusPlayingItem()
        self.parent.parent.ListTab.reInitBuffer = True
        self.parent.parent.ListBox.List.reInitBuffer = True
Beispiel #6
0
def mfeats_single(path, queue=None):
    # uses mono data

    # tic = time.time()
    find_key = True
    find_gain = False
    key, bit, error = ('', 16, 0)
    path = os.path.abspath(path)
    mdx = makemdx(path)
    # key_analysis_fs = 6000
    # waveform_length = 2000
    # waveform_oversampling = 20
    # key_analysis_fs = 6000
    # waveform_length = 2500
    # waveform_oversampling = 20
    waveform_length = 10000
    waveform_oversampling = 20
    best_highlight_duration_beat = 32 * 2
    # best_highlight_duration_beat = 16 * 2
    version = MFEATS_VERSION

    init_bass_decode()
    channel = get_channel(path)
    hstream = pybass.BASS_StreamCreateFile(
        False, path, 0, 0, pybass.BASS_STREAM_DECODE
        | pybass.BASS_STREAM_PRESCAN | pybass.BASS_UNICODE)
    fs = ctypes.c_float()
    pybass.BASS_ChannelGetAttribute(hstream, pybass.BASS_ATTRIB_FREQ,
                                    ctypes.byref(fs))
    fs = int(fs.value)
    hlength = pybass.BASS_ChannelGetLength(hstream, pybass.BASS_POS_BYTE)
    duration = pybass.BASS_ChannelBytes2Seconds(hstream, hlength)
    total_frame_length = hlength / 2

    if hstream == 0:
        error = 1

    if error == 1:
        mfeats_data = MFEATS(mdx=mdx,
                             path=path,
                             date=time.time(),
                             version=version,
                             error=1)
        if queue is not None:
            queue.put(mfeats_data)
            return
        else:
            return mfeats_data

    if total_frame_length / 2 < waveform_length:
        waveform_length = total_frame_length / 2
    frame_length = int(1.0 * total_frame_length / waveform_length) * 2
    if int(frame_length) % 8 != 0:
        frame_length += (8 - int(frame_length) % 8)
    gap = total_frame_length / frame_length - waveform_length
    waveform = numpy.linspace(0, 0, int(round(waveform_length + gap)))
    highlight_raw_points = numpy.linspace(0, 0,
                                          int(round(waveform_length + gap)))
    frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)
    jump = 1.0 * frame_length / waveform_oversampling

    analyze_frame, tempo_frame, tempo_fs = ([], [], 200)

    for cnt, frame_position in enumerate(
            numpy.arange(0, total_frame_length - frame_length, frame_length)):
        pybass.BASS_ChannelGetData(
            hstream, frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
            int(frame_length * 2))

        mono_frame = deepcopy(frame_raw[::channel])
        analyze_frame += [mono_frame]
        if jump < waveform_oversampling:
            waveform[cnt] = numpy.max(numpy.abs(mono_frame))
        else:
            points = [
                numpy.max(numpy.abs(mono_frame[int(i):int(i + jump)]))
                for i in numpy.arange(0, frame_length / channel - jump, jump)
            ]
            waveform[cnt] = numpy.mean(points)
        highlight_raw_points[cnt] = numpy.mean(numpy.abs(mono_frame))

        # collect frames for long term analysis

        alength = len(analyze_frame) * len(analyze_frame[-1])
        if alength >= fs * 30 or alength * channel >= total_frame_length - frame_length:
            analyze_frame = numpy.concatenate(analyze_frame, axis=0)

            num = int(len(analyze_frame) / (1.0 * fs / tempo_fs))
            tempo_frame += [
                numpy.abs(
                    numpy.interp(
                        numpy.linspace(0,
                                       len(analyze_frame) - 1, num),
                        numpy.arange(len(analyze_frame)), analyze_frame))
            ]

            # key_frame_length = int(fs*0.25); key_frame_jump = 0.8
            # for i in range(0, len(analyze_frame)-key_frame_length,\
            #   int(key_frame_length*key_frame_jump)):
            #   spectrum = numpy.fft.fft(\
            #       analyze_frame[i:i+key_frame_length],\
            #       int(fs*key_analysis_resolution))
            #   spectrum = numpy.abs(spectrum[1:int(len(spectrum)/2)])
            #   notes = spectrum_to_note_by_max(spectrum, note_freq_div)
            #   chromagram += [note_to_chroma_by_max(notes)]
            analyze_frame = []

    # waveform

    waveform = numpy.int8(waveform / (2**8))

    # tempo analysis with tempo_frame

    # if duration > 60:
    #   tempo_frame = numpy.concatenate(tempo_frame, axis=0)
    #   tempo = get_tempo(tempo_frame, tempo_fs)
    # else: tempo = 0.0
    tempo_frame = numpy.concatenate(tempo_frame, axis=0)
    tempo = get_tempo(tempo_frame, tempo_fs)

    xtempo = tempo
    if xtempo <= 95:
        xtempo = xtempo * 2.0
    if xtempo == 0:
        until_duration = duration
    else:
        until_duration = 60.0 / xtempo * best_highlight_duration_beat

    # highlight analysis with highlight_raw_points

    jump = 1
    duratsum = []
    highlight_length = until_duration * len(highlight_raw_points) / duration
    duratsum = numpy.linspace(0, 0, len(highlight_raw_points))

    # limit_factor = 1-(duration-60.0)/duration
    # if limit_factor > 1.00: limit_factor = 0.00
    # if limit_factor < 0.25: limit_factor = 0.25
    # limithead = limit_factor
    # limittail = limit_factor
    # highlight_raw_points[:int(len(highlight_raw_points)*limithead)] = 0
    # highlight_raw_points[-int(len(highlight_raw_points)*limittail):] = 0

    limit_factor = 1 - (duration - 60.0) / duration
    if limit_factor > 1.00:
        limit_factor = 0.00
    if limit_factor < 0.25:
        limit_factor = 0.25
    htlength = int(len(highlight_raw_points) * limit_factor)
    window = numpy.hamming(htlength * 2)
    highlight_raw_points[:
                         htlength] = highlight_raw_points[:
                                                          htlength] * window[:
                                                                             htlength]
    highlight_raw_points[
        -htlength:] = highlight_raw_points[-htlength:] * window[htlength:]

    for cnt in numpy.arange(0,
                            len(highlight_raw_points) - highlight_length,
                            jump):
        thdata = numpy.mean(
            highlight_raw_points[int(cnt):int(cnt + highlight_length)])
        duratsum[int(cnt)] = thdata
    pntadd = numpy.argmax(duratsum)
    offset_time = 1.0 * jump * pntadd / (len(highlight_raw_points) / duration)
    highlight = (offset_time, until_duration)

    # autogain analysis in highlight period

    autogain = 0.4
    if find_gain:
        if duration > 60:
            autogain_analysis_length = highlight[1]
            if duration - highlight[0] < autogain_analysis_length:
                autogain_analysis_length = duration - highlight[0]
            frame_length = fs * channel * autogain_analysis_length
            byte_position = pybass.BASS_ChannelSeconds2Bytes(
                hstream, highlight[0])
            pybass.BASS_ChannelSetPosition(hstream, byte_position, False)
            frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)

            pybass.BASS_ChannelGetData(
                hstream,
                frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
                int(frame_length * 2))

            mono_frame = frame_raw[::channel] / 32768.0
            mono_frame = fir_filter(mono_frame,
                                    lowcut=500,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=15)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=1000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=7)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=1000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=5)
            mono_frame = fir_filter(mono_frame,
                                    lowcut=5000,
                                    highcut=fs / 2,
                                    fs=fs,
                                    order=5)
            if fs / 2 > 21000:
                mono_frame = fir_filter(mono_frame,
                                        lowcut=0,
                                        highcut=20000,
                                        fs=fs,
                                        order=45)
                mono_frame += fir_filter(
                    mono_frame, lowcut=15000, highcut=fs / 2, fs=fs,
                    order=5) * 0.5
            rms = numpy.mean(mono_frame**2)**0.5 * 3

            # spectrum = numpy.fft.fft(mono_frame, fs)
            # spectrum = numpy.abs(spectrum[1:int(len(spectrum)/2)])
            # pylab.plot(spectrum); pylab.show()

            autogain = 0.14 / rms

    # key analysis in highlight period

    key = ''
    if find_key:
        chromagram, resolution = ([], 1.0)
        note_freq_div = get_note_freq_div(resolution)
        # note_window = get_note_window(fs, resolution, note_freq_div)
        if xtempo == 0:
            frame_length = int(fs * channel * 0.5)
        else:
            frame_length = int(fs * channel * (60.0 / xtempo))

        offset_position, until_position = (highlight[0],
                                           fs * channel * until_duration * 2)

        if frame_length > total_frame_length:
            frame_length = total_frame_length - 1

        if offset_position + until_position > total_frame_length:
            until_position = total_frame_length - offset_position

        frame_raw = numpy.arange(frame_length, dtype=ctypes.c_short)
        byte_position = pybass.BASS_ChannelSeconds2Bytes(
            hstream, offset_position)
        pybass.BASS_ChannelSetPosition(hstream, byte_position, False)

        for cnt, frame_position in enumerate(
                numpy.arange(0, total_frame_length - frame_length,
                             frame_length)):

            pybass.BASS_ChannelGetData(
                hstream,
                frame_raw.ctypes.data_as(ctypes.POINTER(ctypes.c_short)),
                int(frame_length * 2))

            mono_frame = frame_raw[::channel] / 32768.0
            spectrum = numpy.fft.fft(mono_frame, int(fs * resolution))
            spectrum = numpy.abs(spectrum[1:int(len(spectrum) / 2)])

            if find_key:

                notes = spectrum_to_note_by_max(spectrum, note_freq_div)
                chromagram += [note_to_chroma_by_max(notes)]

            if (cnt + 1) * frame_length >= until_position:
                break

        scored_keys, key_scores, key_counts = ([], [0] * 24, [0] * 24)
        for chroma in chromagram:
            lag, score = get_chord_binaries_correlation_lag_score(chroma)
            scored_keys += [lag]
            key_counts[lag] += 1
            key_scores[lag] += score
        key_scores = numpy.array(key_scores)
        max_key_scores = max(key_scores)
        if max_key_scores == 0.0:
            key = ''
        else:
            key_scores = key_scores / max_key_scores * 100
            scored_key_idx = []
            for i in range(1):
                value, pnt = find_max(key_scores)
                if value < 50:
                    break
                scored_key_idx += [pnt[0]]
                key_scores[pnt[0]] = 0
            string_keys = []
            for i in range(len(scored_key_idx) - 1, -1, -1):
                if scored_key_idx[i] - 12 in scored_key_idx:
                    scored_key_idx.pop(i)
                    continue
                elif scored_key_idx[i] + 12 in scored_key_idx:
                    scored_key_idx.pop(i)
                    continue
                string_keys += [chord_idx_to_char(scored_key_idx[i])]
            string_keys = ' or '.join(string_keys)
            key = '%s' % (string_keys)

    # chromagram = numpy.array(chromagram).T
    # chromagram = numpy.flipud(chromagram)
    # pylab.imshow(chromagram, interpolation='nearest')
    # pylab.show() # pylab.grid(True)

    # md5 = file2md5(upath)

    tempo_type, key_type, save_tag = open_shelves(
        ('tempo_restict_type', 'key_format_type', 'auto_save_tag'),
        PREFERENCE_DB)

    if tempo_type is None or tempo_type == 0:
        pass
    elif tempo_type == 1 and tempo <= 95:
        tempo = tempo * 2.0
    elif tempo_type == 2 and tempo >= 120:
        tempo = tempo * 0.5
    if key_type is None:
        key_type = 1
    key = convert_chord_type(key, key_type)
    if save_tag is None:
        save_tag = False
    if save_tag:
        mutagen_mp3 = mutagen.mp3.MP3(path)
        mutagen_mp3['TKEY'] = mutagen.id3.TKEY(encoding=3, text=[key])
        mutagen_mp3['TBPM'] = mutagen.id3.TBPM(encoding=3, text=[tempo])
        mutagen_mp3.save()

    mfeats_data = MFEATS(mdx=mdx,
                         path=path,
                         key=key,
                         tempo=tempo,
                         duration=duration,
                         highlight=highlight,
                         waveform=waveform,
                         date=time.time(),
                         version=version,
                         channel=channel,
                         bit=bit,
                         error=0,
                         autogain=autogain)

    # pybass.BASS_Free()
    if pybass.BASS_ChannelIsActive(hstream) == 1:
        pybass.BASS_StreamFree(hstream)

    # print 'mfeats_single_finished: elapsed_time: %03.06fsec: %03.06fmsec/onesec'\
    #   % (time.time()-tic, (time.time()-tic)/duration*1000)
    # return mfeats_data

    if queue is not None:
        queue.put(mfeats_data)
        return
    else:
        return mfeats_data