Esempio n. 1
0
 def test_Save(self):
     cidx = self._sample_1.extract_channel(0)
     channel = self._sample_1.get_channel(cidx)
     audio = Audio()
     audio.append_channel( channel )
     signals.save( TestChannel._sample_path_new, audio )
     savedaudio = signals.open(TestChannel._sample_path_new)
     
     self._sample_1.rewind()
     frames = self._sample_1.read_frames( self._sample_1.get_nframes() )
     savedframes = savedaudio.read_frames( self._sample_1.get_nframes() )
     self.assertEqual(len(frames), len(savedframes))
     self.assertEqual(frames, savedframes)
Esempio n. 2
0
    def export(self, inputname, outputname):
        """
        Create a new wav file with requested parameters.

        @param inputname (string) name of the inputfile
        @param reqSamplewidth (string) name of the outputfile

        """
        toconvert = False

        audio = signals.open(inputname)

        if (audio.get_sampwidth() < self._reqSamplewidth):
            raise NameError("The sample width of ("+str(audio.get_sampwidth())+") of the given file is not appropriate. " + str(self._reqSamplewidth) + " bytes required")

        if (audio.get_framerate() < self._reqFramerate):
            raise NameError("The framerate of ("+str(audio.get_framerate())+") of the given file is not appropriate. " + str(self._reqFramerate) + " Hz required")

        if (self._reqSamplewidth != audio.get_sampwidth()):
            toconvert = True
            if self._logfile:
                self._logfile.print_message("The sample width of ("+str(audio.get_sampwidth())+") of the given file is not appropriate. Sample width is changed to " + str(self._reqSamplewidth) + " bytes", indent=3, status=1)

        if (self._reqChannels != audio.get_nchannels()):
            toconvert = True
            if self._logfile:
                self._logfile.print_message("The number of channels of ("+str(audio.get_nchannels())+") of the given file is not appropriate. Number of channels is changed to " + str(self._reqChannels) + " channels", indent=3, status=1)

        if (self._reqFramerate != audio.get_framerate()):
            toconvert = True
            if self._logfile:
                self._logfile.print_message("The framerate of ("+str(audio.get_framerate())+") of the given file is not appropriate. Framerate is changed to " + str(self._reqFramerate) + " Hz", indent=3, status=1)

        if toconvert is True:
            # Get the expected channel
            idx = audio.extract_channel(0)
            # no more need of input data, can close
            audio.close()

            # Do the job (do not modify the initial channel).
            formatter = ChannelFormatter( audio.get_channel(idx) )
            formatter.set_framerate(self._reqFramerate)
            formatter.set_sampwidth(self._reqSamplewidth)
            formatter.convert()

            # Save the converted channel
            audio_out = Audio()
            audio_out.append_channel( formatter.channel )
            signals.save( outputname, audio_out )

        return toconvert
Esempio n. 3
0
    def run(self, audiofile, trsfile, output_dir, output_ext="TextGrid"):
        """
        Split an audio file into multiple small audio file.

        @param audiofile is the audio input file name
        @param trsfile is the transcription input file name
        @param output_dir is a directory name to save output tracks (one per unit)
        @param output_ext (default TextGrid)

        """
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        audiospeech = signals.open(audiofile)

        transcription = annotationdata.io.read(trsfile)

        tracks_tier = None
        for tier in transcription:
            if "name" in tier.GetName().lower():
                tracks_tier = tier

        if tracks_tier is None:
            raise Exception("Expected tier not found: a tier name must contain 'name'")

        list_transcription = TrsUtils.Split(transcription, tracks_tier)
        names = [a.GetLabel().GetValue() for a in tracks_tier if not a.GetLabel().IsEmpty()]

        trstracks = []
        for trs in list_transcription:
            begin = int(trs.GetBegin() * audiospeech.get_framerate())
            end = int(trs.GetEnd() * audiospeech.get_framerate())
            trstracks.append((begin, end))
            TrsUtils.Shift(trs, trs.GetBegin())

        chunks = []
        nframes = audiospeech.get_nframes()
        for from_pos, to_pos in trstracks:
            if nframes < from_pos:
                raise ValueError("Position %d not in range(%d)" % (from_pos, nframes))
            audiospeech.set_pos(from_pos)
            chunks.append(audiospeech.read_frames(to_pos - from_pos))

        for name, chunk, trs in zip(names, chunks, list_transcription):
            signals.save(os.path.join(output_dir, name + ".wav"), chunk)
            annotationdata.io.write(os.path.join(output_dir, name + "." + output_ext), trs)
Esempio n. 4
0
    def test_WriteFrames(self):
        _sample_new = "newFile.wav"
        # save first
        signals.save( _sample_new, self._sample_1 )
        # read the saved file and compare Audio() instances
        newFile = signals.open( _sample_new )
        self.assertEqual(newFile.get_framerate(), self._sample_1.get_framerate())
        self.assertEqual(newFile.get_sampwidth(), self._sample_1.get_sampwidth())
        self.assertEqual(newFile.get_nchannels(), self._sample_1.get_nchannels())
        self.assertEqual(newFile.get_nframes(), self._sample_1.get_nframes())
        newFile.close()
        os.remove(_sample_new)
        self._sample_1.rewind()

        signals.save_fragment( _sample_new, self._sample_1, self._sample_1.read_frames(self._sample_1.get_nframes()))
        newFile = signals.open( _sample_new )
        self.assertEqual(newFile.get_framerate(), self._sample_1.get_framerate())
        self.assertEqual(newFile.get_sampwidth(), self._sample_1.get_sampwidth())
        self.assertEqual(newFile.get_nchannels(), self._sample_1.get_nchannels())
        self.assertEqual(newFile.get_nframes(), self._sample_1.get_nframes())
        newFile.close()
        os.remove(_sample_new)
        
        _sample_new = "newFile.aiff"
        # save first
        signals.save( _sample_new, self._sample_4 )
        # read the saved file and compare Audio() instances
        newFile = signals.open( _sample_new )
        self.assertEqual(newFile.get_framerate(), self._sample_4.get_framerate())
        self.assertEqual(newFile.get_sampwidth(), self._sample_4.get_sampwidth())
        self.assertEqual(newFile.get_nchannels(), self._sample_4.get_nchannels())
        self.assertEqual(newFile.get_nframes(), self._sample_4.get_nframes())
        newFile.close()
        os.remove(_sample_new)
        self._sample_4.rewind()

        signals.save_fragment( _sample_new, self._sample_4, self._sample_4.read_frames(self._sample_4.get_nframes()))
        newFile = signals.open( _sample_new )
        self.assertEqual(newFile.get_framerate(), self._sample_4.get_framerate())
        self.assertEqual(newFile.get_sampwidth(), self._sample_4.get_sampwidth())
        self.assertEqual(newFile.get_nchannels(), self._sample_4.get_nchannels())
        self.assertEqual(newFile.get_nframes(), self._sample_4.get_nframes())
        newFile.close()
        os.remove(_sample_new)
Esempio n. 5
0
if args.bf and args.bs:
    print "bf option and bs option can't be used at the same time !"
    sys.exit(1)

if args.ef and args.es:
    print "ef option and es option can't be used at the same time !"
    sys.exit(1)

if args.bf:
    begin = args.bf
elif args.bs:
    begin = args.bs*audio.get_framerate()
else:
    begin = 0
if args.ef:
    end = args.ef
elif args.es:
    end = args.es*audio.get_framerate()
else:
    end = 0

for i in xrange(audio.get_nchannels()):
    idx = audio.extract_channel(i)
    audio.rewind()
    channel = audio.get_channel(idx)
    extracter = ChannelFragmentExtracter(channel)
    audio_out.append_channel(extracter.extract_fragment(begin, end))
    
signals.save(args.o, audio_out)
Esempio n. 6
0
    
# ----------------------------------------------------------------------------


equalizer = ChannelsEqualizer()

file = signals.open(args.w[0])
sampwidth = file.get_sampwidth()
framerate = file.get_framerate()

for inputFile in args.w:
    audio = signals.open(inputFile)
    if audio.get_sampwidth() != sampwidth:
        print "Input files must have the same sample width !"
        sys.exit(1)
    if audio.get_framerate() != framerate:
        print "Input files must have the same framerate !"
        sys.exit(1)
    idx = audio.extract_channel(1)
    equalizer.append_channel(audio.get_channel(idx))
    
equalizer.equalize()

# Save the converted channel
for i, chan in enumerate(equalizer.channels):
    audio_out = Audio()
    audio_out.append_channel( chan )
    filename, extension = os.path.splitext(args.w[i])
    signals.save(filename + "EQUAL" + extension, audio_out)

# ----------------------------------------------------------------------------