def test(self): mediaName = os.environ['BBB_MOVIE'] media = avMedia.Media(mediaName, quiet=False) infoDict = media.info() #print(infoDict) assert infoDict['name'] == mediaName assert 'mov' in infoDict['format'] assert infoDict['stream'][0]['type'] == 'video' assert infoDict['stream'][0]['width'] == 320 assert infoDict['stream'][0]['height'] == 180 assert infoDict['stream'][1]['type'] == 'audio' assert infoDict['stream'][1]['channels'] == 2
def testImageArray(self): # write image then read it and test data mediaName = '/tmp/fooArray.tiff' w = 8 h = 8 resolution = (w, h) streamInfo = { 'width': resolution[0], 'height': resolution[1], 'codec': 'auto', 'pixelFormat': 'rgb24', 'type': 'video', } # codec auto. guess a = array.array('B') colorTuple = [11, 128, 255] a.fromlist(colorTuple*w*h) with avMedia.Media.open(mediaName, 'w', streamsInfo=[streamInfo]) as m: m.write(a, 1, 'video') assert(os.path.exists(mediaName)) assert(os.path.getsize(mediaName) > 0) media2 = avMedia.Media(mediaName) mediaInfo = media2.info() vstream = 0 streamInfo = mediaInfo['stream'][vstream] assert streamInfo['width'] == w assert streamInfo['height'] == h media2.addScaler(vstream, w, h) for pkt in media2: if pkt.streamIndex() == vstream: pkt.decode() if pkt.decoded: frame = pkt.swsFrame assert self.compareData(frame, w, h, colorTuple) break
def testCopyPacket(self): mediaName = os.environ['CONSTANT_WAV'] media = avMedia.Media(mediaName, quiet=False) mediaInfo = media.info() astream = 0 # audio stream index streamInfo = mediaInfo['stream'][astream] for pkt in media: if pkt.streamIndex() == astream: pkt2 = copy.copy(pkt) pkt2.decode() if pkt2.decoded: frame = pkt2.frame assert self.compareAudioData(frame, pkt2.dataSize)
def testImage(self): mediaName = os.environ['TIFF_IMAGE'] media = avMedia.Media(mediaName, quiet=False) mediaInfo = media.info() # retrieve video width and height vstream = 0 # video stream index streamInfo = mediaInfo['stream'][vstream] w, h = streamInfo['width'], streamInfo['height'] # output rgb24 (24 bits) media.addScaler(vstream, w, h) for pkt in media: if pkt.streamIndex() == vstream: pkt.decode() if pkt.decoded: frame = pkt.swsFrame assert self.compareData(frame, w, h)
def testAudio(self): mediaName = os.environ['CONSTANT_WAV'] media = avMedia.Media(mediaName, quiet=False) mediaInfo = media.info() astream = 0 # audio stream index streamInfo = mediaInfo['stream'][astream] wp = wave.open(mediaName) # check ffmpeg info against wave info frames = wp.getnframes() rate = wp.getframerate() assert wp.getnchannels() == streamInfo['channels'] assert rate == streamInfo['sampleRate'] assert wp.getsampwidth() == streamInfo['bytesPerSample'] duration = frames / float(rate) assert duration == mediaInfo['duration'] # size in bytes for 1 second of audio secondSize = streamInfo['channels'] * streamInfo['bytesPerSample'] * streamInfo['sampleRate'] decodedSize = 0 for pkt in media: if pkt.streamIndex() == astream: pkt.decode() if pkt.decoded: frame = pkt.frame decodedSize += pkt.dataSize assert self.compareAudioData(frame, pkt.dataSize) # check total decoded size # TODO: check with wav of len 2.1s assert secondSize * duration == decodedSize
def testAudioResampling(self): mediaName = os.environ['CONSTANT_WAV'] media = avMedia.Media(mediaName, quiet=False) mediaInfo = media.info() astream = 0 # audio stream index streamInfo = mediaInfo['stream'][astream] if not avMedia.AVPY_RESAMPLE_SUPPORT: print('No resampling support, test disabled.') return wp = wave.open(mediaName) # check ffmpeg info against wave info frames = wp.getnframes() rate = wp.getframerate() assert wp.getnchannels() == streamInfo['channels'] assert rate == streamInfo['sampleRate'] assert wp.getsampwidth() == streamInfo['bytesPerSample'] duration = frames / float(rate) assert duration == mediaInfo['duration'] # inital sample rate / 2 outSampleRate = 22050 outAudio = { 'layout': 'stereo', # XXX: channelLayout? 'channels': 2, 'sampleRate': outSampleRate, 'sampleFmt': 's16', 'bytesPerSample': 2, } hasResampler = media.addResampler(astream, streamInfo, outAudio) assert hasResampler # size in bytes for 1 second of audio secondSize = outAudio['channels'] * outAudio['bytesPerSample'] * outAudio['sampleRate'] decodedSize = 0 for pkt in media: if pkt.streamIndex() == astream: pkt.decode() if pkt.decoded: frame = pkt.resampledFrame decodedSize += pkt.rDataSize assert self.compareAudioData(frame, pkt.rDataSize) expectedSize = duration*secondSize print('decodedSize', decodedSize) print('expectedSize', expectedSize) print('frame size', streamInfo['frameSize']) secondsDiff = float(expectedSize-decodedSize)/secondSize print('diff (in seconds)', secondsDiff) # XXX: accept a small margin error for now # wondering if this is normal or not... assert secondsDiff < 0.01
def testImage(self): # write image then read it and test data # TODO: temp dir --> from TOX or gen a new one? mediaName = '/tmp/foo.tiff' # FIXME: invalid argument with .tga? #mediaName = '/tmp/blah.tga' w = 160 h = 120 media = avMedia.Media(mediaName, 'w', quiet=False) resolution = (w, h) streamInfoVideo = { 'width': resolution[0], 'height': resolution[1], } # codec auto. guess streamInfoVideo['codec'] = 'auto' streamInfoVideo['pixelFormat'] = 'rgb24' streamIndex = media.addStream('video', streamInfoVideo) pkt = media.videoPacket() # see http://multimedia.cx/eggs/supplying-ffmpeg-with-metadata/ # for available metadata per container metadata = {'artist': 'me'} media.writeHeader(metadata) # Presentation TimeStamp (pts) pts = 0 #maxFrame = 1 self.fillRgb(pkt.frame, *resolution) media.write(pkt, pts+1, 'video') media.writeTrailer() # writing done # now read image and check data assert(os.path.exists(mediaName)) assert(os.path.getsize(mediaName) > 0) media2 = avMedia.Media(mediaName) mediaInfo = media2.info() vstream = 0 streamInfo = mediaInfo['stream'][vstream] assert streamInfo['width'] == w assert streamInfo['height'] == h media2.addScaler(vstream, w, h) for pkt in media2: if pkt.streamIndex() == vstream: pkt.decode() if pkt.decoded: frame = pkt.swsFrame assert self.compareData(frame, w, h, (109, 219, 1)) break
def testWav(self): # write wav then read it and test data mediaName = '/tmp/foo.wav' media = avMedia.Media(mediaName, 'w', quiet=False) streamInfoAudio = { 'bitRate': 64000, # XXX: compute it, why 64000? 'sampleRate': 44100, 'channels': 2, 'codec': 'auto', } streamIndex = media.addStream('audio', streamInfoAudio) pkt = media.audioPacket() info = media.info() si = info['stream'][0] frameSize = info['stream'][0]['frameSize'] # size in bytes for 1 second of audio secondSize = si['channels'] * si['bytesPerSample'] * si['sampleRate'] # write ~ 1s of audio nFrame = float(secondSize)/float(frameSize) nFrameInt = int(round(nFrame)) # TODO: check with empty dict or None metadata = {'artist': 'me'} media.writeHeader(metadata) # Presentation TimeStamp (pts) pts = 0 for i in range(nFrameInt): for i in range(int(frameSize/4)): # write # 1 - 255 (left channel) # 250 - 253 (right channel) pkt.frame.contents.data[0][i*4] = 1 pkt.frame.contents.data[0][i*4+1] = 255 pkt.frame.contents.data[0][i*4+2] = 250 pkt.frame.contents.data[0][i*4+3] = 254 media.write(pkt, pts+1, 'audio') pts += 1 media.writeTrailer() # read wav wp = wave.open(mediaName) # check ffmpeg info against wave info frames = wp.getnframes() rate = wp.getframerate() assert wp.getnchannels() == si['channels'] assert rate == si['sampleRate'] assert wp.getsampwidth() == si['bytesPerSample'] duration = frames / float(rate) assert(duration > 0) if nFrame != nFrameInt: # assert abs(1.0 - duration) < 0.01 assert_less(abs(1.0 - duration), 0.01) else: assert duration == 1.0 # note: for wave module, a frame is equivalent of # 1 sample of audio # so here: 4 bytes (2 channels * 2 bytes per channel) wpFrame = wp.readframes(1) wpFrameCount = 0 while wpFrame: # 01 - 255 - 250 - 254 if avMedia.PY3: assert wpFrame == b'\x01\xff\xfa\xfe' else: assert wpFrame == '\x01\xff\xfa\xfe' wpFrameCount += 1 wpFrame = wp.readframes(1) assert wpFrameCount / nFrameInt == frameSize / (si['channels'] * si['bytesPerSample'])