def _process_message(self, msg):
     if msg.message_type != message.BLOCK:
         # Clear the buffer and reset our various time-tracking
         # variables before passing the message through.
         self._buffered = []
         self._start_timestamp_ms = None
         self._elapsed_frame_ms = 0
         self._add_message(msg)
         return
     # If necessary, remember the start time of this message.
     if self._start_timestamp_ms is None:
         self._start_timestamp_ms = msg.start_timestamp_ms
     # Take the buffered data and split it into MP3 frames.
     self._buffered.append(msg.payload)
     frames = list(mp3_frame.split_blocks(
             iter(self._buffered), expected_hdr=self._expected_hdr))
     self._buffered = []
     # If any of the last frames appear to be junk, they might
     # contain a truncated frame.  If so, stick them back onto
     # the buffer.
     while frames:
         last_hdr, last_buffer = frames[-1]
         if last_hdr is not None:
             break
         self._buffered.insert(0, last_buffer)
         frames.pop()
     # Turn the frames into FRAME messages and add them to our
     # queue.
     start_ms = (self._start_timestamp_ms
                 + self._start_adjustment_ms
                 + self._elapsed_frame_ms)
     for hdr, data in frames:
         new_msg = message.Message()
         if hdr is None:
             new_msg.message_type = message.BLOCK
             duration_ms = 0
         else:
             new_msg.message_type = message.FRAME
             duration_ms = hdr.duration_ms
             self._elapsed_frame_ms += duration_ms
         new_msg.connection_id = msg.connection_id
         new_msg.connection_offset = msg.connection_offset
         new_msg.mp3_header = hdr
         new_msg.payload = data
         # Now set the start and end timestamps; these are our best
         # approximations, and might do alarming things like jump
         # backward in time.
         new_msg.start_timestamp_ms = int(start_ms)
         new_msg.end_timestamp_ms = int(start_ms + duration_ms)
         start_ms += duration_ms
         self._add_message(new_msg)
     # Now let's see if we need to modify our start-time adjustment.
     # First, compute the total wall-clock time so far.
     wall_clock_ms = msg.end_timestamp_ms - self._start_timestamp_ms
     # Now find the difference between the elaped wall-clock time
     # and the elapsed time implied by the frames.
     delta_ms = wall_clock_ms - self._elapsed_frame_ms
     # If the observed delta is negative (indicating that we've
     # seen more frames than would normally be possible given how
     # much time has passed), our new fudge factor will be the
     # average of the observed discepancy and the previous fudge
     # factor.
     if delta_ms < 0:
         self._start_adjustment_ms = (
             self._start_adjustment_ms + delta_ms)/2
Esempio n. 2
0
 def _process_message(self, msg):
     if msg.message_type != message.BLOCK:
         # Clear the buffer and reset our various time-tracking
         # variables before passing the message through.
         self._buffered = []
         self._start_timestamp_ms = None
         self._elapsed_frame_ms = 0
         self._add_message(msg)
         return
     # If necessary, remember the start time of this message.
     if self._start_timestamp_ms is None:
         self._start_timestamp_ms = msg.start_timestamp_ms
     # Take the buffered data and split it into MP3 frames.
     self._buffered.append(msg.payload)
     frames = list(
         mp3_frame.split_blocks(iter(self._buffered),
                                expected_hdr=self._expected_hdr))
     self._buffered = []
     # If any of the last frames appear to be junk, they might
     # contain a truncated frame.  If so, stick them back onto
     # the buffer.
     while frames:
         last_hdr, last_buffer = frames[-1]
         if last_hdr is not None:
             break
         self._buffered.insert(0, last_buffer)
         frames.pop()
     # Turn the frames into FRAME messages and add them to our
     # queue.
     start_ms = (self._start_timestamp_ms + self._start_adjustment_ms +
                 self._elapsed_frame_ms)
     for hdr, data in frames:
         new_msg = message.Message()
         if hdr is None:
             new_msg.message_type = message.BLOCK
             duration_ms = 0
         else:
             new_msg.message_type = message.FRAME
             duration_ms = hdr.duration_ms
             self._elapsed_frame_ms += duration_ms
         new_msg.connection_id = msg.connection_id
         new_msg.connection_offset = msg.connection_offset
         new_msg.mp3_header = hdr
         new_msg.payload = data
         # Now set the start and end timestamps; these are our best
         # approximations, and might do alarming things like jump
         # backward in time.
         new_msg.start_timestamp_ms = int(start_ms)
         new_msg.end_timestamp_ms = int(start_ms + duration_ms)
         start_ms += duration_ms
         self._add_message(new_msg)
     # Now let's see if we need to modify our start-time adjustment.
     # First, compute the total wall-clock time so far.
     wall_clock_ms = msg.end_timestamp_ms - self._start_timestamp_ms
     # Now find the difference between the elaped wall-clock time
     # and the elapsed time implied by the frames.
     delta_ms = wall_clock_ms - self._elapsed_frame_ms
     # If the observed delta is negative (indicating that we've
     # seen more frames than would normally be possible given how
     # much time has passed), our new fudge factor will be the
     # average of the observed discepancy and the previous fudge
     # factor.
     if delta_ms < 0:
         self._start_adjustment_ms = (self._start_adjustment_ms +
                                      delta_ms) / 2
Esempio n. 3
0
    def test_split(self):
        raw_hdr, hdr = mp3_header_test.VALID_MP3_HEADERS.items()[0]
        frame_data = raw_hdr.ljust(hdr.frame_size, "a")
        # Set up a fragment of a header
        partial_header = raw_hdr[:3]
        short_frame = frame_data[:25]
        assert len(short_frame) < len(frame_data)

        id3_data = id3_header.create_test_header(77).ljust(77, "b")

        # An ID3 tag with a valid frame tag stashed inside.
        evil_id3_data = id3_header.create_test_header(50) + raw_hdr
        evil_id3_data = evil_id3_data.ljust(50, "c")

        for seq in (
            [ frame_data ],
            [ frame_data, frame_data ],
            [ 'junk', frame_data ],
            [ 'junk', frame_data, frame_data ],
            [ 'junk', frame_data, frame_data, 'junk' ],
            [ 'junk', frame_data, frame_data, 'junk', frame_data ],
            # Check handling of truncated headers and frames.
            [ partial_header ],
            [ 'junk', partial_header ],
            [ 'junk', short_frame ],
            [ frame_data, partial_header ],
            [ frame_data, short_frame ],
            [ frame_data, 'junk', short_frame ],
            [ frame_data, 'junk', partial_header],
            # ID3 headers mixed in
            [ id3_data, frame_data ],
            [ frame_data, id3_data ],
            [ id3_data, frame_data ],
            [ id3_data, frame_data, id3_data ],
            [ evil_id3_data, frame_data, "junk" ],
            [ "junk", frame_data, evil_id3_data, frame_data ],
            [ "junk", frame_data, evil_id3_data, frame_data, "junk" ],
            [ "junk" + evil_id3_data, id3_data, frame_data, evil_id3_data ],
            # Some longer sequences
            500 * [ frame_data ],
            500 * [ "junk", frame_data, id3_data, frame_data ]
            ):
            data = ''.join(seq)
            stream = cStringIO.StringIO(data)
            split_stream = list(mp3_frame.split(stream))
            split_stream_from_blocks = list(mp3_frame.split_blocks(iter(seq)))
            split_stream_from_one_block = mp3_frame.split_one_block(data)
            # Make sure that the sequences of header/frame data pairs
            # returned by mp3_frame.split(), mp3_frame.split_blocks()
            # and mp3_frame.split_one_block() matche what we would
            # expect.
            self.assertEqual(len(seq), len(split_stream))
            for expected_data, (actual_hdr, data) in zip(seq, split_stream):
                self.assertEqual(expected_data, data)
                if expected_data == frame_data:
                    self.assertTrue(actual_hdr is not None)
                    self.assertTrue(actual_hdr.match(hdr))
                    self.assertEqual(hdr.frame_size, len(frame_data))
                else:
                    self.assertTrue(actual_hdr is None)

            self.assertEqual(len(seq), len(split_stream_from_blocks))
            for (hdr1, data1), (hdr2, data2) in zip(split_stream,
                                                    split_stream_from_blocks):
                self.assertEqual(str(hdr1), str(hdr2))
                self.assertEqual(data1, data2)

            self.assertEqual(len(seq), len(split_stream_from_one_block))
            for (hdr1, data1), (hdr2, data2) in zip(
                split_stream, split_stream_from_one_block):
                self.assertEqual(str(hdr1), str(hdr2))
                self.assertEqual(data1, data2)