Esempio n. 1
0
    def get_silence(self, p=0.250, v=150, s=0.):
        """
        Estimates silences from an audio file.
        @deprecated

        @param p (float) Minimum silence duration in seconds
        @param v (int) Expected minimum volume (rms value)
        @param s (float) Shift delta duration in seconds
        @return a set of frames corresponding to silences.

        """
        self.channel.seek(0)
        self.silence = []

        # Once silence has been found, continue searching in this interval
        nbreadframes = int(self.volstats.get_winlen() * self.channel.get_framerate())
        afterloop_frames = int(nbreadframes/2) #int((nbreadframes/2) * self.channel.get_framerate())
        initpos = i = self.channel.tell()

        # This scans the file in steps of frames whether a section's volume
        # is lower than silence_cap, if it is it is written to silence.
        while i < self.channel.get_nframes():

            curframe = self.channel.get_frames(nbreadframes)

            a = AudioFrames( curframe, self.channel.get_sampwidth(), 1)
            #volume = audioutils.get_rms(curframe, self.channel.get_sampwidth())
            volume = a.rms()
            if volume < v:

                # Continue searching in smaller steps whether the silence is
                # longer than read frames but smaller than read frames * 2.
                while volume < v and self.channel.tell() < self.channel.get_nframes():
                    curframe = self.channel.get_frames(afterloop_frames)

                    a = AudioFrames( curframe, self.channel.get_sampwidth(), 1)
                    volume = a.rms()
                    #volume   = audioutils.get_rms(curframe, self.channel.get_sampwidth())

                # If the last sequence of silence ends where the new one starts
                # it's a continuous range.
                if self.silence and self.silence[-1][1] == i:
                    self.silence[-1][1] = self.channel.tell()
                else:
                # append if silence is long enough
                    duree = self.channel.tell() - i
                    nbmin = int( (p+s) * self.channel.get_framerate())
                    if duree > nbmin:
                        # Adjust silence start-pos
                        __startpos = i + ( s * self.channel.get_framerate() )
                        # Adjust silence end-pos
                        __endpos = self.channel.tell() - ( s * self.channel.get_framerate() )
                        self.silence.append([__startpos, __endpos])

            i = self.channel.tell()

        # Return the position in the file to where it was when we got it.
        self.channel.seek(initpos)
Esempio n. 2
0
    def __init__(self, channel, winlen=0.01):
        """
        Constructor.

        @param channel (Channel) The channel to work on.
        @param winlen (float) Window length to estimate the volume.

        """
        BaseVolume.__init__(self)
        self.winlen = winlen

        # Remember current position
        pos = channel.tell()

        # Rewind to the beginning
        channel.rewind()

        # Constants
        nbframes = int(winlen * channel.get_framerate())
        nbvols = int(channel.get_duration()/winlen) + 1
        self.volumes = [0]*nbvols

        for i in range(nbvols):
            frames = channel.get_frames( nbframes )
            a = AudioFrames( frames, channel.get_sampwidth(), 1)
            self.volumes[i] = a.rms()

        if self.volumes[-1] == 0:
            self.volumes.pop()

        # Returns to the position where we was before
        channel.seek(pos)

        self.rms = channel.rms()
Esempio n. 3
0
    def rms(self):
        """
        Return the root mean square of the whole file

        @return the root mean square of the audio file

        """
        pos = self.tell()
        self.seek(0)
        a = AudioFrames(self.read_frames(self.get_nframes()), self.get_sampwidth(), self.get_nchannels())
        self.seek(pos)

        return a.rms()