Beispiel #1
0
    def _generate_applicationkeysecret(self, deviceid):
        deviceid = deviceid.encode("utf-8")  # for python3
        # plus 1 hour and drop minute and secs
        # for python3 : floor division
        ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
        time_struct = time.gmtime(ts_1hour)
        ts_1hour_str = str(ts_1hour).encode("utf-8")

        h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
        h.update(self.SECRETKEY)
        tmp = h.digest()
        for i in range(time_struct.tm_mon):
            h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
            h.update(tmp)
            tmp = h.digest()
        h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
        h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
        tmp = h.digest()
        for i in range(time_struct.tm_mday % 5):
            h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
            h.update(tmp)
            tmp = h.digest()

        h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
        h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
        tmp = h.digest()

        for i in range(time_struct.tm_hour % 5):  # utc hour
            h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
            h.update(tmp)
            tmp = h.digest()

        return urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
Beispiel #2
0
    def _compute(self, code, parseInt, _0x59ce16, _1x4bfb36, domain):

        _0x1bf6e5 = ''
        ke = []

        for i in list(range(0, len(code[0:9 * 8]), 8)):
            ke.append(int(code[i:i + 8], 16))

        _0x439a49 = 0
        _0x145894 = 0

        while _0x439a49 < len(code[9 * 8:]):

            _0x5eb93a = 64
            _0x896767 = 0
            _0x1a873b = 0
            _0x3c9d8e = 0

            while 1:

                if _0x439a49 + 1 >= len(code[9 * 8:]):
                    _0x5eb93a = 143

                _0x3c9d8e = int(code[9 * 8 + _0x439a49:9 * 8 + _0x439a49 + 2],
                                16)
                _0x439a49 += 2

                if _0x1a873b < 6 * 5:
                    _0x332549 = _0x3c9d8e & 63
                    _0x896767 += _0x332549 << _0x1a873b
                else:
                    _0x332549 = _0x3c9d8e & 63
                    _0x896767 += int(_0x332549 * pow(2, _0x1a873b))

                _0x1a873b += 6

                if not _0x3c9d8e >= _0x5eb93a:
                    break

            _0x30725e = _0x896767 ^ ke[_0x145894 % 9] ^ parseInt ^ _1x4bfb36
            _0x2de433 = _0x5eb93a * 2 + 127

            for i in list(range(4)):

                _0x3fa834 = chr(((_0x30725e & _0x2de433) >> (9 * 8 / 9) * i) -
                                1)

                if _0x3fa834 != '$':

                    _0x1bf6e5 += _0x3fa834

                _0x2de433 = (_0x2de433 << int(9 * 8 / 9))

            _0x145894 += 1

        url = self.web_url.format(domain, _0x1bf6e5)

        return url
Beispiel #3
0
 def _send_ping(self, interval):
     while True:
         for i in list(range(interval)):
             time.sleep(1)
             if not self.keep_running:
                 return
         self.sock.ping()
    def process_chunks(self):
        chunk_data = []
        if self.chunk_id == self.stream.first_chunk_data.chunk_id:
            chunk_data = self.stream.first_chunk_data
        elif self.stream.poller.data_chunks:
            chunk_data = self.stream.poller.data_chunks.popleft()

        if chunk_data:
            chunks = []
            count = 0
            for k, v in sorted(chunk_data.hashes.items(),
                               key=lambda c: int(c[0])):
                start = int(k)
                end = int(k) + 10
                for i in range(start, end):
                    if i > chunk_data.chunk_id and chunk_data.chunk_id != 0:
                        # Live: id is higher as chunk_data.chunk_id
                        count += chunk_data.chunk_time / 1000
                        available_at = (chunk_data.current_timestamp +
                                        datetime.timedelta(seconds=count))
                    else:
                        # Live: id is the same as chunk_data.chunk_id or lower
                        # VOD: chunk_data.chunk_id is 0
                        available_at = chunk_data.current_timestamp
                    chunks += [
                        Chunk(int(i), self.template_url % (int(i), v),
                              available_at)
                    ]
            self.chunks = chunks
Beispiel #5
0
    def process_chunks(self):
        chunk_data = []
        if self.chunk_id == self.stream.first_chunk_data.chunk_id:
            chunk_data = self.stream.first_chunk_data
        elif self.stream.poller.data_chunks:
            chunk_data = self.stream.poller.data_chunks.popleft()

        if chunk_data:
            chunks = []
            count = 0
            for k, v in sorted(chunk_data.hashes.items(),
                               key=lambda c: int(c[0])):
                start = int(k)
                end = int(k) + 10
                for i in range(start, end):
                    if i > chunk_data.chunk_id and chunk_data.chunk_id != 0:
                        # Live: id is higher as chunk_data.chunk_id
                        count += chunk_data.chunk_time / 1000
                        available_at = (chunk_data.current_timestamp
                                        + datetime.timedelta(seconds=count))
                    else:
                        # Live: id is the same as chunk_data.chunk_id or lower
                        # VOD: chunk_data.chunk_id is 0
                        available_at = chunk_data.current_timestamp
                    chunks += [Chunk(int(i),
                                     self.template_url % (int(i), v),
                                     available_at)]
            self.chunks = chunks
Beispiel #6
0
    def iter_segments(self):
        while not self.closed:
            fragments = range(self.current_fragment, self.last_fragment + 1)
            fragments = filter(self.valid_fragment, fragments)

            for fragment in fragments:
                self.current_fragment = fragment + 1
                self.current_segment = self.segment_from_fragment(fragment)

                fragment_duration = int(
                    self.fragment_duration(fragment) * 1000)
                fragment_url = self.fragment_url(self.current_segment,
                                                 fragment)
                fragment = Fragment(self.current_segment, fragment,
                                    fragment_duration, fragment_url)

                log.debug("Adding fragment {0}-{1} to queue", fragment.segment,
                          fragment.fragment)
                yield fragment

                # End of stream
                stream_end = self.end_fragment and fragment.fragment >= self.end_fragment
                if self.closed or stream_end:
                    return

            if self.wait(self.bootstrap_reload_time):
                try:
                    self.update_bootstrap()
                except StreamError as err:
                    log.warning("Failed to update bootstrap: {0}", err)
Beispiel #7
0
 def await_write(self, write_calls=1, timeout=5):
     writer = self.thread.reader.writer
     for _ in range(write_calls):
         writer.write_wait.set()
         writer.write_done.wait(timeout)
         writer.write_done.clear()
         if writer.write_error:  # pragma: no cover
             raise writer.write_error
Beispiel #8
0
 def device_id(self):
     """
     Randomly generated deviceId.
     :return:
     """
     if self._device_id is None:
         self._device_id = "".join(
             random.choice(
                 "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
             ) for _ in range(50))
     return self._device_id
Beispiel #9
0
 def transform_token(token_in, date):
     token_out = list(token_in)
     offset = len(token_in)
     for i in range(offset - 1, -1, -1):
         p = (i * date) % offset
         # swap chars at p and i
         token_out[i], token_out[p] = token_out[p], token_out[i]
     token_out = ''.join(token_out)
     if token_out.endswith("OK"):
         return token_out[:-2]
     else:
         log.error("Invalid site token: {0} => {1}".format(
             token_in, token_out))
Beispiel #10
0
    def mask(mask_key, data):
        """
        mask or unmask data. Just do xor for each byte

        mask_key: 4 byte string(byte).

        data: data to mask/unmask.
        """
        _m = array.array("B", mask_key)
        _d = array.array("B", data)
        for i in list(range(len(_d))):
            _d[i] ^= _m[i % 4]
        return _d.tostring()
    def _get_streams(self):
        media_id, application = self._get_media_app()
        if media_id:
            api = UHSClient(media_id,
                            application,
                            referrer=self.url,
                            cluster="live",
                            password=self.get_option("password"),
                            proxy=self.session.get_option("http-proxy"))
            log.debug(
                "Connecting to UStream API: media_id={0}, application={1}, referrer={2}, cluster={3}",
                media_id, application, self.url, "live")
            api.connect()

            streams_data = {}
            for _ in range(5):
                # do not use to many tries, it might take longer for a timeout
                # when streamFormats is {} and contentAvailable is True
                data = api.recv()
                try:
                    if data["cmd"] == "moduleInfo":
                        r = self.handle_module_info(data["args"])
                        if r:
                            streams_data.update(r)
                    elif data["cmd"] == "reject":
                        self.handle_reject(api, data["args"])
                    else:
                        log.debug("Unexpected `{0}` command".format(
                            data["cmd"]))
                        log.trace("{0!r}".format(data))
                except ModuleInfoNoStreams:
                    break

                if streams_data.get("streams") and streams_data.get("cdn_url"):
                    for s in sorted(streams_data["streams"],
                                    key=lambda k:
                                    (k["stream_name"], k["path"])):
                        yield s["stream_name"], UHSStream(
                            session=self.session,
                            api=api,
                            first_chunk_data=ChunkData(
                                s["first_chunk"], s["chunk_time"], s["hashes"],
                                datetime.datetime.now(tz=utc)),
                            template_url=urljoin(streams_data["cdn_url"],
                                                 s["path"]),
                        )
                    break
    def update_chunk_info(self, result):
        chunk_range = result["chunkRange"]
        if not chunk_range:
            return

        chunk_id = int(result["chunkId"])
        chunk_offset = int(result["offset"])
        chunk_range = dict(map(partial(map, int), chunk_range.items()))

        self.chunk_ranges.update(chunk_range)
        self.chunk_id_min = sorted(chunk_range)[0]
        self.chunk_id_max = int(result["chunkId"])
        self.chunks = [Chunk(i, self.format_chunk_url(i),
                             not self.chunk_id and i == chunk_id and chunk_offset)
                       for i in range(self.chunk_id_min, self.chunk_id_max + 1)]

        if self.chunk_id is None and self.chunks:
            self.chunk_id = chunk_id
Beispiel #13
0
    def _get_videokey_from_ticket(self, ticket):
        params = {
            "osName": "android",
            "osVersion": "6.0.1",
            "osLang": "ja_JP",
            "osTimezone": "Asia/Tokyo",
            "appId": "tv.abema",
            "appVersion": "3.27.1"
        }
        auth_header = {"Authorization": "Bearer " + self.usertoken}
        res = self._session.http.get(self._MEDIATOKEN_API, params=params,
                                     headers=auth_header)
        jsonres = self._session.http.json(res,
                                          schema=self._MEDIATOKEN_SCHEMA)
        mediatoken = jsonres['token']

        res = self._session.http.post(self._LICENSE_API,
                                      params={"t": mediatoken},
                                      json={"kv": "a", "lt": ticket})
        jsonres = self._session.http.json(res,
                                          schema=self._LICENSE_SCHEMA)
        cid = jsonres['cid']
        k = jsonres['k']

        res = sum([self.STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i))
                  for i in range(len(k))])
        encvideokey = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)

        # HKEY:
        # RC4KEY = unhexlify('DB98A8E7CECA3424D975280F90BD03EE')
        # RC4DATA = unhexlify(b'D4B718BBBA9CFB7D0192A58F9E2D146A'
        #                     b'FC5DB29E4352DE05FC4CF2C1005804BB')
        # rc4 = ARC4.new(RC4KEY)
        # HKEY = rc4.decrypt(RC4DATA)
        h = hmac.new(unhexlify(self.HKEY),
                     (cid + self.deviceid).encode("utf-8"),
                     digestmod=hashlib.sha256)
        enckey = h.digest()

        aes = AES.new(enckey, AES.MODE_ECB)
        rawvideokey = aes.decrypt(encvideokey)

        return rawvideokey
Beispiel #14
0
    def _create_flv_playlist(self, template):
        res = http.get(template)
        playlist = http.json(res, schema=_vod_playlist_schema)

        parsed = urlparse(template)
        url_template = "{0}://{1}{2}".format(
            parsed.scheme, parsed.netloc, playlist["template"]
        )
        segment_max = reduce(lambda i, j: i + j[0], playlist["fragments"], 0)

        substreams = [HTTPStream(self.session,
                                 url_template.replace("$fragment$", str(i)))
                      for i in range(1, segment_max + 1)]

        return FLVPlaylist(self.session,
                           duration=playlist["duration"],
                           flatten_timestamps=True,
                           skip_header=True,
                           streams=substreams)
Beispiel #15
0
    def update_chunk_info(self, result):
        chunk_range = result["chunkRange"]
        if not chunk_range:
            return

        chunk_id = int(result["chunkId"])
        chunk_offset = int(result["offset"])
        chunk_range = {int(k): str(v) for k, v in chunk_range.items()}
        self.chunk_ranges.update(chunk_range)

        if self.chunk_id is None:
            self.chunk_id = chunk_id

        for i in range(self.chunk_id, chunk_id + 1):
            self.chunks.append(
                Chunk(i, self.format_chunk_url(i), not self.chunk_id
                      and i == chunk_id and chunk_offset))

        self.chunk_id = chunk_id + 1
    def _create_flv_playlist(self, template):
        res = http.get(template)
        playlist = http.json(res, schema=_vod_playlist_schema)

        parsed = urlparse(template)
        url_template = "{0}://{1}{2}".format(
            parsed.scheme, parsed.netloc, playlist["template"]
        )
        segment_max = reduce(lambda i,j: i + j[0], playlist["fragments"], 0)

        substreams = [HTTPStream(self.session,
                                 url_template.replace("$fragment$", str(i)))
                      for i in range(1, segment_max + 1)]

        return FLVPlaylist(self.session,
                           duration=playlist["duration"],
                           flatten_timestamps=True,
                           skip_header=True,
                           streams=substreams)
Beispiel #17
0
    def update_chunk_info(self, result):
        chunk_range = result["chunkRange"]
        if not chunk_range:
            return

        chunk_id = int(result["chunkId"])
        chunk_offset = int(result["offset"])
        chunk_range = dict(map(partial(map, int), chunk_range.items()))

        self.chunk_ranges.update(chunk_range)
        self.chunk_id_min = sorted(chunk_range)[0]
        self.chunk_id_max = int(result["chunkId"])
        self.chunks = [
            Chunk(i, self.format_chunk_url(i), not self.chunk_id
                  and i == chunk_id and chunk_offset)
            for i in range(self.chunk_id_min, self.chunk_id_max + 1)
        ]

        if self.chunk_id is None and self.chunks:
            self.chunk_id = chunk_id
Beispiel #18
0
    def _get_streams(self):
        media_id, application = self._get_media_app()
        if media_id:
            api = UHSClient(media_id, application, referrer=self.url, cluster="live", password=self.get_option("password"))
            log.debug("Connecting to UStream API: media_id={0}, application={1}, referrer={2}, cluster={3}",
                      media_id, application, self.url, "live")
            api.connect()

            streams_data = {}
            streams = {}
            for _ in range(5):
                # do not use to many tries, it might take longer for a timeout
                # when streamFormats is {} and contentAvailable is True
                data = api.recv()
                try:
                    if data["cmd"] == "moduleInfo":
                        r = self.handle_module_info(data["args"])
                        if r:
                            streams_data.update(r)
                    elif data["cmd"] == "reject":
                        self.handle_reject(api, data["args"])
                    else:
                        log.debug("Unexpected `{0}` command".format(data["cmd"]))
                        log.trace("{0!r}".format(data))
                except ModuleInfoNoStreams:
                    return None

                if streams_data.get("streams") and streams_data.get("cdn_url"):
                    for s in streams_data["streams"]:
                        streams[s["stream_name"]] = UHSStream(
                            session=self.session,
                            api=api,
                            first_chunk_data=ChunkData(
                                s["first_chunk"],
                                s["chunk_time"],
                                s["hashes"],
                                datetime.datetime.now(tz=utc)),
                            template_url=urljoin(streams_data["cdn_url"],
                                                 s["path"]),
                        )
                    return streams
Beispiel #19
0
    def iter_segments(self):
        quality = QUALITY_MAP[self.stream.quality]
        for part in self.stream.parts:
            duration = part["duration"]
            if not part.get("recording"):
                recording = part["id"]
                extension = "part"
            else:
                recording = part["recording"]
                extension = "rec"

            chunks = int(duration / 12) + 1
            start = int(part.get("start", 0) / 12)
            for sequence in range(start, chunks + start):
                if self.closed:
                    return

                self.logger.debug("Adding chunk {0}/{1}/{2} to queue",
                                  recording, quality, sequence)

                yield Chunk(recording, quality, sequence, extension)
Beispiel #20
0
 def pinst_id(self):
     return ''.join([
         random.choice(
             "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
         ) for _ in range(8)
     ])
Beispiel #21
0
    def _handle_module_info_stream(self, data):
        # type: (Dict)
        if data.get("contentAvailable") is False:
            return self._set_error("This stream is currently offline")

        mp4_segmented = data.get("streamFormats", {}).get("mp4/segmented")
        if not mp4_segmented:
            return

        # parse the stream formats once
        if self.stream_initial_id is None:
            try:
                formats = self._schema_stream_formats.validate(mp4_segmented)
                formats = formats["streams"]
            except PluginError as err:
                return self._set_error(err)
            self.stream_formats_video = list(
                filter(lambda f: type(f) is StreamFormatVideo, formats))
            self.stream_formats_audio = list(
                filter(lambda f: type(f) is StreamFormatAudio, formats))

        # parse segment duration and hashes, and queue new segments
        try:
            segmentdata = self._schema_stream_segments.validate(mp4_segmented)
            # type: Dict
        except PluginError:
            log.error("Failed parsing hashes")
            return

        current_id = segmentdata["chunkId"]
        # type: int
        duration = segmentdata["chunkTime"]
        # type: int
        path = segmentdata["contentAccess"]
        # type: str
        hashes = segmentdata["hashes"]
        # type: Dict[int, str]

        sorted_ids = sorted(hashes.keys())
        count = len(sorted_ids)
        if count == 0:
            return

        # initial segment ID (needed by the workers to filter queued segments)
        if self.stream_initial_id is None:
            self.stream_initial_id = current_id

        current_time = datetime.now()

        # lock the stream segments deques for the worker threads
        with self.stream_segments_lock:
            # interpolate and extrapolate segments from the provided id->hash data
            diff = 10 - sorted_ids[
                0] % 10  # if there's only one id->hash item, extrapolate until the next decimal
            for idx, segment_id in enumerate(sorted_ids):
                idx_next = idx + 1
                if idx_next < count:
                    # calculate the difference between IDs and use that to interpolate segment IDs
                    # the last id->hash item will use the previous diff to extrapolate segment IDs
                    diff = sorted_ids[idx_next] - segment_id
                for num in range(segment_id, segment_id + diff):
                    self._segments_append(
                        Segment(num=num,
                                duration=duration,
                                available_at=current_time +
                                timedelta(seconds=(num - current_id - 1) *
                                          duration / 1000),
                                hash=hashes[segment_id],
                                path=path))

        self._set_ready()
Beispiel #22
0
 def cache_buster_string(length):
     return "".join(
         [random.choice(string.ascii_uppercase) for i in range(length)])
Beispiel #23
0
    timeout: socket timeout time. This value is integer.
             if you set None for this value, it means "use default_timeout value"

    options: current support option is only "header".
             if you set header as dict value, the custom HTTP headers are added.
    """
    sockopt = options.get("sockopt", [])
    sslopt = options.get("sslopt", {})
    websock = WebSocket(sockopt=sockopt, sslopt=sslopt)
    websock.settimeout(timeout if timeout is not None else default_timeout)
    websock.connect(url, **options)
    return websock


_MAX_INTEGER = (1 << 32) - 1
_AVAILABLE_KEY_CHARS = list(range(0x21, 0x2f + 1)) + list(range(
    0x3a, 0x7e + 1))
_MAX_CHAR_BYTE = (1 << 8) - 1

# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html


def _create_sec_websocket_key():
    uid = uuid.uuid4()
    return base64.encodestring(uid.bytes).strip()


_HEADERS_TO_CHECK = {
    "upgrade": "websocket",
    "connection": "upgrade",
Beispiel #24
0
    def parse_manifest(cls, session, url_or_manifest, **args):
        """
        Attempt to parse a DASH manifest file and return its streams

        :param session: Streamlink session instance
        :param url_or_manifest: URL of the manifest file or an XML manifest string
        :return: a dict of name -> DASHStream instances
        """

        if url_or_manifest.startswith('<?xml'):
            mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True))
        else:
            res = session.http.get(url_or_manifest, **session.http.valid_request_args(**args))
            url = res.url

            urlp = list(urlparse(url))
            urlp[2], _ = urlp[2].rsplit("/", 1)

            mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)

        video, audio = [], []

        # Search for suitable video and audio representations
        for aset in mpd.periods[0].adaptationSets:
            if aset.contentProtection:
                raise PluginError("{} is protected by DRM".format(url))
            for rep in aset.representations:
                if rep.mimeType.startswith("video"):
                    video.append(rep)
                elif rep.mimeType.startswith("audio"):
                    audio.append(rep)

        if not video:
            video = [None]

        if not audio:
            audio = [None]

        locale = session.localization
        locale_lang = locale.language
        lang = None
        available_languages = set()

        # if the locale is explicitly set, prefer that language over others
        for aud in audio:
            if aud and aud.lang:
                available_languages.add(aud.lang)
                try:
                    if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang:
                        lang = aud.lang
                except LookupError:
                    continue

        if not lang:
            # filter by the first language that appears
            lang = audio[0] and audio[0].lang

        log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(
            ", ".join(available_languages) or "NONE",
            lang or "n/a"
        ))

        # if the language is given by the stream, filter out other languages that do not match
        if len(available_languages) > 1:
            audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio))

        ret = []
        for vid, aud in itertools.product(video, audio):
            stream = DASHStream(session, mpd, vid, aud, **args)
            stream_name = []

            if vid:
                stream_name.append("{:0.0f}{}".format(vid.height or vid.bandwidth_rounded, "p" if vid.height else "k"))
            if audio and len(audio) > 1:
                stream_name.append("a{:0.0f}k".format(aud.bandwidth))
            ret.append(('+'.join(stream_name), stream))

        # rename duplicate streams
        dict_value_list = defaultdict(list)
        for k, v in ret:
            dict_value_list[k].append(v)

        ret_new = {}
        for q in dict_value_list:
            items = dict_value_list[q]

            try:
                items = sorted(items, key=lambda k: k.video_representation.bandwidth, reverse=True)
            except AttributeError:
                pass

            for n in range(len(items)):
                if n == 0:
                    ret_new[q] = items[n]
                elif n == 1:
                    ret_new['{0}_alt'.format(q)] = items[n]
                else:
                    ret_new['{0}_alt{1}'.format(q, n)] = items[n]
        return ret_new
Beispiel #25
0
    def iter_tags(self, fd=None, buf=None, skip_header=None):
        flags = U8.read(fd)
        quality = flags & 15
        version = flags >> 4
        lookup_size = U16BE.read(fd)
        enc_table = fd.read(lookup_size)

        key = b""
        iv = b""

        for i in range(16):
            key += fd.read(1)
            iv += fd.read(1)

        if not (key and iv):
            return

        dec_table = self.decrypt_data(key, iv, enc_table)
        dstream = BytesIO(dec_table)

        # Decode lookup table (ported from K-S-V BeatConvert.php)
        while True:
            flags = U8.read(dstream)
            if not flags:
                break

            typ = flags >> 4
            encrypted = (flags & 4) > 0
            keyframe = (flags & 2) > 0
            config = (flags & 1) > 0
            time = U32BE.read(dstream)
            data_length = U32BE.read(dstream)

            if encrypted:
                raw_length = U32BE.read(dstream)
            else:
                raw_length = data_length

            # Decrypt encrypted tags
            data = fd.read(data_length)
            if encrypted:
                data = self.decrypt_data(key, iv, data)
                data = data[:raw_length]

            # Create video tag
            if typ == 1:
                if version == 2:
                    if config:
                        avc = AVCVideoData(AVC_PACKET_TYPE_SEQUENCE_HEADER,
                                           data=data)
                    else:
                        avc = AVCVideoData(AVC_PACKET_TYPE_NALU, data=data)

                    if keyframe:
                        videodata = VideoData(VIDEO_FRAME_TYPE_KEY_FRAME,
                                              VIDEO_CODEC_ID_AVC, avc)
                    else:
                        videodata = VideoData(VIDEO_FRAME_TYPE_INTER_FRAME,
                                              VIDEO_CODEC_ID_AVC, avc)
                else:
                    videodata = RawData(data)

                yield Tag(TAG_TYPE_VIDEO, time, videodata)

            # Create audio tag
            if typ == 2:
                if version == 2:
                    if config:
                        aac = AACAudioData(AAC_PACKET_TYPE_SEQUENCE_HEADER,
                                           data)
                    else:
                        aac = AACAudioData(AAC_PACKET_TYPE_RAW, data)

                    audiodata = AudioData(codec=AUDIO_CODEC_ID_AAC,
                                          rate=AUDIO_RATE_44_KHZ,
                                          bits=AUDIO_BIT_RATE_16,
                                          type=AUDIO_TYPE_STEREO,
                                          data=aac)
                else:
                    audiodata = RawData(data)

                yield Tag(TAG_TYPE_AUDIO, time, audiodata)