def test_CreateSilence(self): self._sample_1.extract_channel(0) self._sample_2.extract_channel(0) channel = self._sample_1.get_channel(0) monofrag = MonoFragment(channel.frames) monofrag.create_silence(1000) self.assertEqual(channel.get_nframes()+1000, len(monofrag.get_frames())/channel.get_sampwidth())
def equalize(self): """ Equalize the number of frames of all the channels by appending silence at the end. """ nframes = 0 for i in xrange(len(self.channels)): nframes = max(nframes, self.channels[i].get_nframes()) for i in xrange(len(self.channels)): if self.channels[i].get_nframes() < nframes: fragment = MonoFragment(self.channels[i].frames) fragment.create_silence(nframes - self.channels[i].get_nframes()) self.channels[i] = Channel(self.channels[i].get_framerate(), self.channels[i].get_sampwidth(), fragment.get_frames())
def __convert_frames(self, frames): """ Convert frames to the expected sample width and frame rate. @param frames (string) the frames to convert """ f = frames fragment = MonoFragment(f) #Convert the sample width if it needs to if (self.channel.get_sampwidth() != self.sampwidth): fragment.changesampwidth(self.channel.get_sampwidth(), self.sampwidth) #Convert the self.framerate if it needs to if (self.channel.get_framerate() != self.framerate): fragment.resample(self.sampwidth, self.channel.get_framerate(), self.framerate) return fragment.get_frames()