def list(self):
        url = "%s/%s/posts?page=%u&%s" % (utils.url_root, self.author_url,
                                          self.current_page,
                                          utils.selected_languages())
        html_data = http_request.get(url)
        soup_strainer = SoupStrainer("main")
        beautiful_soup = BeautifulSoup(
            html_data,
            soup_strainer,
            convertEntities=BeautifulSoup.HTML_ENTITIES)
        articles = beautiful_soup.findAll("article")
        if articles is None:
            control.directory_end()
            return

        for article in articles:
            utils.add_entry_video(article)

        next_url = "%s?action=list-author&page=%i&author-url=%s" % (
            sys.argv[0], self.current_page + 1,
            urllib.quote_plus(self.author_url))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def browse(self):
        url = self.browse_url % (utils.url_root, urllib.quote_plus(
            self.sort), self.current_page, urllib.quote_plus(
                self.search_term), utils.selected_languages())
        html_data = http_request.get(url)
        soup_strainer = SoupStrainer("main")
        beautiful_soup = BeautifulSoup(
            html_data,
            soup_strainer,
            convertEntities=BeautifulSoup.HTML_ENTITIES)

        ul_entries = beautiful_soup.find("ol", {"class": "authorsByLetter"})
        if ul_entries is None:
            control.directory_end()
            return

        li_entries = ul_entries.findAll("li")
        for li_entry in li_entries:
            self.add_author_directory(li_entry)

        if self.search_term == "":
            next_url = "%s?action=browse-authors&page=%i&sort=%s" % (
                sys.argv[0], self.current_page + 1,
                urllib.quote_plus(self.sort_method))
        else:
            next_url = "%s?action=search-authors&page=%i&query=%s" % (
                sys.argv[0], self.current_page + 1,
                urllib.quote_plus(self.search_term))
        print next_url
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
Example #3
0
    def list(self):
        url = ""
        if not re.match("^https?:", self.event_url):
            url = "%s%s" % (utils.url_root, self.event_url)
        else:
            url = self.event_url

        url = "%s?sort=%s&page=%i&direction=asc&%s" % (
            url, self.sort, self.current_page, utils.selected_languages())

        html_data = http_request.get(url)

        soup_strainer = SoupStrainer("main")
        beautiful_soup = BeautifulSoup(
            html_data,
            soup_strainer,
            convertEntities=BeautifulSoup.HTML_ENTITIES)
        articles = beautiful_soup.findAll("article")
        if articles is None:
            control.directory_end()
            return

        for article in articles:
            utils.add_entry_video(article)

        next_url = "%s?action=list-event&page=%i&sort=%s&event-url=%s" % (
            sys.argv[0], self.current_page + 1,
            urllib.quote_plus(
                self.sort_method), urllib.quote_plus(self.event_url))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def live(self):
        url = self.browse_url % (utils.url_root, urllib.quote_plus(self.sort), self.current_page, utils.selected_languages())
        html_data = http_request.get(url)
        print url
        soup_strainer = SoupStrainer("div", {"class": "tab-content"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)
        ul_entries = beautiful_soup.find("ul", {"class": "entries"})

        if ul_entries is None:
            # nothing is live
            control.directory_end()
            return

        li_entries = ul_entries.findAll("li")
        for li_entry in li_entries:
            action_url = ("%s?action=list-event&event-url=" % (sys.argv[0])) + "%s"
            utils.add_show_directory(li_entry, action_url)

        next_url = "%s?action=browse-shows&page=%i&sort=%s" % (
            sys.argv[0], self.current_page + 1, urllib.quote_plus(self.sort_method))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()

        return
    def browse(self):
        url = self.browse_url % (utils.url_root, urllib.quote_plus(
            self.sort), self.current_page, utils.selected_languages())
        html_data = http_request.get(url)
        soup_strainer = SoupStrainer("main")
        beautiful_soup = BeautifulSoup(
            html_data,
            soup_strainer,
            convertEntities=BeautifulSoup.HTML_ENTITIES)
        articles = beautiful_soup.findAll("article")
        if articles is None:
            control.directory_end()
            return

        for article in articles:
            action_url = ("%s?action=list-show&show-url=" %
                          (sys.argv[0])) + "%s"
            utils.add_show_directory(article, action_url)

        next_url = "%s?action=browse-shows&page=%i&sort=%s" % (
            sys.argv[0], self.current_page + 1,
            urllib.quote_plus(self.sort_method))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def search(self):
        if self.search_query is None or self.search_query == '':
            t = control.lang(30201).encode('utf-8')
            k = control.keyboard('', t)
            k.doModal()
            self.search_query = k.getText() if k.isConfirmed() else None

        if self.search_query is None or self.search_query == '':
            return

        base_url = "https://c9search.azurewebsites.net/content/search?text=%s&$top=100&$skip=0&$inlinecount=allpages" \
                   % (urllib.quote_plus(self.search_query))
        data = http_request.get(base_url)
        start_index = data.index('"documents":') + 12
        if start_index <= 12:
            return

        json_data = data[start_index:-3]
        json_media = json.loads(json_data)

        for media in json_media:
            title = media["title"]
            url = media["permalink"]
            genre = media["published"]
            thumbnail = media["previewImage"]
            plot = media["summaryBody"]

            list_item = control.item(title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
            list_item.setInfo("video", {"Title": title, "Studio": "Microsoft Channel 9", "Plot": plot, "Genre": genre})
            plugin_play_url = '%s?action=play&video_page_url=%s' % (sys.argv[0], urllib.quote_plus(url))
            control.addItem(handle=int(sys.argv[1]), url=plugin_play_url, listitem=list_item, isFolder=False)

        # End of directory...
        control.directory_end()
        return
    def browse(self):
        url = self.browse_url % (
            utils.url_root, urllib.quote_plus(self.sort), self.current_page, urllib.quote_plus(self.search_term),
            utils.selected_languages())
        html_data = http_request.get(url)
        soup_strainer = SoupStrainer("div", {"class": "tab-content"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)
        ul_authors = beautiful_soup.find("ul", {"class": "authors"})

        if ul_authors is None:
            control.directory_end()
            return

        li_entries = ul_authors.findAll("li")
        for li_entry in li_entries:
            self.add_author_directory(li_entry)

        if self.search_term == "":
            next_url = "%s?action=browse-authors&page=%i&sort=%s" % (
                sys.argv[0], self.current_page + 1, urllib.quote_plus(self.sort_method))
        else:
            next_url = "%s?action=search-authors&page=%i&query=%s" % (
                sys.argv[0], self.current_page + 1, urllib.quote_plus(self.search_term))
        print next_url
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def get_video_url_and_subtitles(self, video_page_url):
        # Get HTML page...
        if not re.match('^https?:', self.video_page_url):
            video_page_url = "%s%s" % (utils.url_root, video_page_url)

        html_data = http_request.get(video_page_url)
        # Parse HTML response...
        soup_strainer = SoupStrainer("div", {"class": "download"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer)

        video_url = None
        subtitle_urls = []
        divs = beautiful_soup.findAll("div")
        li_video_entries = divs[1].findAll("li")

        if len(divs) > 2:
            profile = xbmcaddon.Addon().getAddonInfo('profile')
            tempdir = xbmc.translatePath(os.path.join(profile, "temp/subtiles"))
            if not os.path.isdir(tempdir):
                os.makedirs(tempdir)
            else:
                for temp_file in os.listdir(tempdir):
                    filename = os.path.join(tempdir, temp_file)
                    if os.path.isfile(filename):
                        os.unlink(filename)

            for li_entry in divs[2].findAll("li"):
                li_entry_a = li_entry.find("a")
                if li_entry_a is not None and li_entry_a["download"] is not None:
                    subtitle_url = li_entry_a["href"]
                    subtitle_name = li_entry_a["download"].split("_").pop()
                    data = http_request.get(subtitle_url)
                    subtitle_url = os.path.join(tempdir, subtitle_name)
                    f = open(subtitle_url, 'w')
                    f.write(data)
                    f.close()
                    subtitle_urls.append(subtitle_url)

        for quality in self.video_formats:
            for li_entry in li_video_entries:
                li_entry_a = li_entry.find("a")
                if li_entry_a is not None:
                    if li_entry_a.string.strip() == quality:
                        video_url = li_entry_a["href"]
                        return video_url, subtitle_urls

        return video_url, subtitle_urls
Example #9
0
def get_board(board=None, page=1):
    url = url_board % (url_root, board, page)
    result = http_request.get(url)
    json_obj = json.loads(result)

    return {
        "threads": json_obj["threads"],
        "totalResultCount": len(json_obj["threads"])
    }
Example #10
0
 def browse(self):
     url = self.browse_url % (utils.url_root, self.tag)
     json_data = http_request.get(url)
     tags = json.loads(json_data)
     for tag in tags:
         utils.add_directory("%s (%s)" % (tag['name'], tag['entries']), utils.icon_tag, utils.icon_tag,
                             "%s?action=list-tag&tag-url=%s" % (sys.argv[0], tag['href']))
     control.directory_end()
     return
 def browse(self):
     url = self.browse_url % (utils.url_root, self.tag)
     json_data = http_request.get(url)
     tags = json.loads(json_data)
     for tag in tags:
         utils.add_directory("%s (%s)" % (tag['name'], tag['entries']), utils.icon_tag, utils.icon_tag,
                             "%s?action=list-tag&tag-url=%s" % (sys.argv[0], tag['href']))
     control.directory_end()
     return
Example #12
0
def get_thread(board=None, thread_id=None):
    url = url_thread % (url_root, board, thread_id)
    result = http_request.get(url)
    json_obj = json.loads(result)

    return {
        "posts": json_obj["posts"],
        "totalResultCount": len(json_obj["posts"])
    }
def get_banner(url):
    html_data = http_request.get(url)
    soup_strainer = SoupStrainer("head")
    beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)

    banner = beautiful_soup.find("meta", {"name": "msapplication-square310x310logo"})
    if banner is not None:
        return banner["content"]
    else:
        return None
def get_banner(url):
    html_data = http_request.get(url)
    soup_strainer = SoupStrainer("head")
    beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)

    banner = beautiful_soup.find("meta", {"name": "msapplication-square310x310logo"})
    if banner is not None:
        return banner["content"]
    else:
        return None
Example #15
0
def get_boards():
    url = url_boards % url_root

    # ws_board: 0 == adult
    result = http_request.get(url)
    json_obj = json.loads(result)

    return {
        "boards": json_obj["boards"],
        "totalResultCount": len(json_obj["boards"])
    }
def _build_plugin_from_repository(addons_xml_root, plugin_info):
    name = plugin_info['name']
    repository_url = plugin_info["repository_url"]
    plugin_root_url = urlparse.urljoin(repository_url, "%s/" % name)
    html_data = http_request.get(plugin_root_url)
    beautiful_soup = BeautifulSoup(html_data, 'html.parser')
    a_entries = filter(
        lambda x: x.get('href') == x.contents[0],
        beautiful_soup.find_all("a", href=lambda x: not x.startswith("http")))
    items = []
    for a in a_entries:
        items.append(a.get('href'))
    # this checks if it has a bunch of files... some might not be 'requred'
    if not _can_process_repository_plugin(items):
        print "required file missing"
        return

    build_plugin_path = os.path.join(build_plugins_dir, name)
    if not os.path.exists(build_plugin_path):
        os.mkdir(build_plugin_path)

    for file_name in items:
        out_file = os.path.join(build_plugin_path, file_name)
        dl_url = urlparse.urljoin(plugin_root_url, file_name)
        _download_file(dl_url, out_file)

    if not _repository_has_zip_md5(items):
        zips = fnmatch.filter(items, "*.zip")
        for z in zips:
            zfile = os.path.join(build_plugin_path, z)
            md5file = os.path.join(build_plugin_path, "%s.md5")
            if not os.path.exists(md5file):
                _md5_hash_file(zfile)

    plugin_addon_xml = etree.parse(
        open(os.path.join(build_plugin_path, 'addon.xml')))
    version = _get_version_from_addon_tree(plugin_addon_xml)
    if os.path.exists(os.path.join(build_plugin_path, 'changelog.txt')):
        shutil.move(
            os.path.join(build_plugin_path, 'changelog.txt'),
            os.path.join(build_plugin_path, 'changelog-%s.txt' % version))

    readmes = glob.glob1(build_plugin_path, "readme*")
    if len(readmes) > 0:
        readme = readmes[0]
        shutil.copy2(os.path.join(build_plugin_path, readme),
                     os.path.join(build_plugin_path, "readme.md"))

    addons_xml_root.append(plugin_addon_xml.getroot())
    _cleanup_path(build_plugin_path)
Example #17
0
    def search(self):
        if self.search_query is None or self.search_query == '':
            t = control.lang(30201).encode('utf-8')
            k = control.keyboard('', t)
            k.doModal()
            self.search_query = k.getText() if k.isConfirmed() else None

        if self.search_query is None or self.search_query == '':
            return

        base_url = "https://c9search.azurewebsites.net/content/search?text=%s&$top=100&$skip=0&$inlinecount=allpages" \
                   % (urllib.quote_plus(self.search_query))
        data = http_request.get(base_url)
        start_index = data.index('"documents":') + 12
        if start_index <= 12:
            return

        json_data = data[start_index:-3]
        json_media = json.loads(json_data)

        for media in json_media:
            title = media["title"]
            url = media["permalink"]
            genre = media["published"]
            thumbnail = media["previewImage"]
            plot = media["summaryBody"]

            list_item = control.item(title,
                                     iconImage="DefaultVideo.png",
                                     thumbnailImage=thumbnail)
            list_item.setInfo(
                "video", {
                    "Title": title,
                    "Studio": "Microsoft Channel 9",
                    "Plot": plot,
                    "Genre": genre
                })
            plugin_play_url = '%s?action=play&video_page_url=%s' % (
                sys.argv[0], urllib.quote_plus(url))
            control.addItem(handle=int(sys.argv[1]),
                            url=plugin_play_url,
                            listitem=list_item,
                            isFolder=False)

        # End of directory...
        control.directory_end()
        return
    def list(self):
        url = "%s%s?sort=%s&page=%i&%s" % (
            utils.url_root, self.series_url, self.sort, self.current_page, utils.selected_languages())
        html_data = http_request.get(url)
        print url
        soup_strainer = SoupStrainer("main")
        beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)
        articles = beautiful_soup.findAll("article")
        for article in articles:
            utils.add_entry_video(article)

        next_url = "%s?action=list-series&page=%i&sort=%s&series-url=%s" % (
            sys.argv[0], self.current_page + 1, urllib.quote_plus(self.sort_method), urllib.quote_plus(self.series_url))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def list(self):
        url = "%s%s?sort=%s&page=%i&%s" % (
            utils.url_root, self.series_url, self.sort, self.current_page, utils.selected_languages())
        html_data = http_request.get(url)
        print url
        soup_strainer = SoupStrainer("div", {"class": "tab-content"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)
        ul_entries = beautiful_soup.find("ul", {"class": "entries"})
        li_entries = ul_entries.findAll("li")
        for li_entry in li_entries:
            utils.add_entry_video(li_entry)

        next_url = "%s?action=list-series&page=%i&sort=%s&series-url=%s" % (
            sys.argv[0], self.current_page + 1, urllib.quote_plus(self.sort_method), urllib.quote_plus(self.series_url))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
    def get_video_url(self, video_page_url):
        # Get HTML page...
        if not re.match("^https?:", self.video_page_url):
            video_page_url = "%s%s" % (utils.url_root, video_page_url)

        html_data = http_request.get(video_page_url)
        # Parse HTML response...
        soup_strainer = SoupStrainer("ul", {"class": "download"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer)

        video_url = None
        li_entries = beautiful_soup.findAll("li")
        for li_entry in li_entries:
            li_entry_a = li_entry.find("a")
            if li_entry_a is not None:
                for quality in self.video_formats:
                    if li_entry_a.string == quality:
                        video_url = li_entry_a["href"]
                        break
        return video_url
    def list(self):
        url = "%s/%s/posts?page=%u&%s" % (
            utils.url_root, self.author_url, self.current_page, utils.selected_languages())
        html_data = http_request.get(url)
        soup_strainer = SoupStrainer("div", {"class": "user-content"})
        beautiful_soup = BeautifulSoup(html_data, soup_strainer, convertEntities=BeautifulSoup.HTML_ENTITIES)

        ul_entries = beautiful_soup.find("ul", {"class": "entries"})
        if ul_entries is None:
            control.directory_end()
            return

        li_entries = ul_entries.findAll("li")
        for li_entry in li_entries:
            utils.add_entry_video(li_entry)

        next_url = "%s?action=list-author&page=%i&author-url=%s" % (
            sys.argv[0], self.current_page + 1, urllib.quote_plus(self.author_url))
        utils.add_next_page(beautiful_soup, next_url, self.current_page + 1)

        control.directory_end()
        return
Example #22
0
    def get_meme_gallery(self, sort='viral', window='all', page=0):
        url = self.url_meme_gallery % (self.url_root, sort, window, page)
        result = http_request.get(url, self._authorization_header())
        json_obj = json.loads(result)["data"]

        return {"memes": json_obj, "totalResultCount": len(json_obj)}
Example #23
0
    def get_gallery(self, section='hot', sort='viral', window='all', page=0, show_viral=True):
        url = self.url_gallery % (self.url_root, section, sort, window, page, show_viral)
        result = http_request.get(url, self._authorization_header())
        json_obj = json.loads(result)["data"]

        return {"galleries": json_obj, "totalResultCount": len(json_obj)}
Example #24
0
    def get_random_gallery(self, page=0):
        url = self.url_random_gallery % (self.url_root, page)
        result = http_request.get(url, self._authorization_header())
        json_obj = json.loads(result)["data"]

        return {"galleries": json_obj, "totalResultCount": len(json_obj)}
Example #25
0
    def get_album(self, album_id):
        url = self.url_album % (self.url_root, album_id)
        result = http_request.get(url, self._authorization_header())
        json_obj = json.loads(result)["data"]

        return {"album": json_obj}
Example #26
0
    def get_meme_image(self, image_id):
        url = self.url_image % (self.url_root, image_id)
        result = http_request.get(url, self._authorization_header())
        json_obj = json.loads(result)["data"]

        return {"meme": json_obj}
    def browse(self):
        url = "%s%s" % (utils.url_root, self.topic_url)
        print "url: %s" % url
        if self.course_id is None:
            raise Exception("Missing required course id")
        else:
            course_code = self.course_id

        print "code: %s" % course_code
        data_url = "%sservices/products/anonymous/%s?version=1.0.0.0&isTranscript=false&languageId=12" % (
            utils.url_api, course_code)
        json_data = http_request.get(data_url)

        # "https:\/\/cp-mlxprod-static.microsoft.com\/012044-1000\/en-us"
        scorm_data_url = json.loads(json_data).replace("\\/", "/")

        manifest_url = "%s/imsmanifestlite.json" % scorm_data_url
        thumbnail = "%s/thumbnail.png" % scorm_data_url

        course_details_url = "%s/coursedetails.xml?v=1446384003349" % scorm_data_url
        course_details_data = http_request.get(course_details_url)
        course_details_xml = xml.etree.ElementTree.XML(course_details_data)
        print course_details_xml
        # course_details_root = course_details_xml.getroot()
        course_level = course_details_xml.findall(".//Level")[0].text
        description = course_details_xml.findall('.//Introduction')[0].text

        json_data = http_request.get(manifest_url)
        manifest_data = json.loads(json_data)

        print manifest_data
        manifest = manifest_data["manifest"]
        organizations = manifest["organizations"]

        for org in organizations["organization"]:
            for item in org["item"]:
                try:
                    for video_item in item["item"]:
                        # identifier = item["@identifier"]
                        title = item["title"]
                        resource = video_item["resource"]
                        href = resource["@href"]
                        settings_url = href.split("=")[1]

                        resource_meta = resource["metadata"]
                        print "resource_meta: %s" % resource_meta
                        resource_type = resource_meta["learningresourcetype"]
                        print "resource_type: %s" % resource_type
                        if not re.match("^[Vv]ideo$", resource_type):
                            continue

                        video_duration = resource_meta["duration"]

                        video_settings_url = "%s/%s/videosettings.xml?v=1" % (scorm_data_url, settings_url)
                        video_settings_data = http_request.get(video_settings_url)
                        video_settings = xml.etree.ElementTree.XML(video_settings_data)

                        media_sources = video_settings.findall('.//MediaSources')
                        default_media = None
                        for source in media_sources:
                            if source.attrib["videoType"] == "progressive":
                                progressives = source.findall(".//MediaSource")
                                for prog in progressives:
                                    if prog.attrib["default"] == "true":
                                        if prog.text is not None and prog.text != "":
                                            print "using media mode: %s" % prog.attrib["videoMode"]
                                            default_media = prog.text
                                            break
                                    else:
                                        if default_media is None and (prog.text is not None and prog.text != ""):
                                            print "using media mode: %s" % prog.attrib["videoMode"]
                                            default_media = prog.text
                                continue
                        if default_media is not None:
                            utils.add_video(title, thumbnail, description, "Level %s" % course_level, default_media,
                                            course_code, video_duration)
                        else:
                            print "unable to find media for %s" % video_settings_url
                except Exception, e:
                    print str(e)
    def browse(self):
        url = "%s%s" % (utils.url_root, self.topic_url)
        print "url: %s" % url
        if self.course_id is None:
            raise Exception("Missing required course id")
        else:
            course_code = self.course_id

        print "code: %s" % course_code
        data_url = "%sservices/products/anonymous/%s?version=1.0.0.0&isTranscript=false&languageId=12" % (
            utils.url_api, course_code)
        json_data = http_request.get(data_url)

        # "https:\/\/cp-mlxprod-static.microsoft.com\/012044-1000\/en-us"
        scorm_data_url = json.loads(json_data).replace("\\/", "/")

        manifest_url = "%s/imsmanifestlite.json" % scorm_data_url
        thumbnail = "%s/thumbnail.png" % scorm_data_url

        course_details_url = "%s/coursedetails.xml?v=1446384003349" % scorm_data_url
        course_details_data = http_request.get(course_details_url)
        course_details_xml = xml.etree.ElementTree.XML(course_details_data)
        print course_details_xml
        # course_details_root = course_details_xml.getroot()
        course_level = course_details_xml.findall(".//Level")[0].text
        description = course_details_xml.findall('.//Introduction')[0].text

        json_data = http_request.get(manifest_url)
        manifest_data = json.loads(json_data)

        print manifest_data
        manifest = manifest_data["manifest"]
        organizations = manifest["organizations"]

        for org in organizations["organization"]:
            for item in org["item"]:
                try:
                    for video_item in item["item"]:
                        # identifier = item["@identifier"]
                        title = item["title"]
                        resource = video_item["resource"]
                        href = resource["@href"]
                        settings_url = href.split("=")[1]

                        resource_meta = resource["metadata"]
                        print "resource_meta: %s" % resource_meta
                        resource_type = resource_meta["learningresourcetype"]
                        print "resource_type: %s" % resource_type
                        if not re.match("^[Vv]ideo$", resource_type):
                            continue

                        video_duration = resource_meta["duration"]

                        video_settings_url = "%s/%s/videosettings.xml?v=1" % (
                            scorm_data_url, settings_url)
                        video_settings_data = http_request.get(
                            video_settings_url)
                        video_settings = xml.etree.ElementTree.XML(
                            video_settings_data)

                        media_sources = video_settings.findall(
                            './/MediaSources')
                        default_media = None
                        for source in media_sources:
                            if source.attrib["videoType"] == "progressive":
                                progressives = source.findall(".//MediaSource")
                                for prog in progressives:
                                    if prog.attrib["default"] == "true":
                                        if prog.text is not None and prog.text != "":
                                            print "using media mode: %s" % prog.attrib[
                                                "videoMode"]
                                            default_media = prog.text
                                            break
                                    else:
                                        if default_media is None and (
                                                prog.text is not None
                                                and prog.text != ""):
                                            print "using media mode: %s" % prog.attrib[
                                                "videoMode"]
                                            default_media = prog.text
                                continue
                        if default_media is not None:
                            utils.add_video(title, thumbnail, description,
                                            "Level %s" % course_level,
                                            default_media, course_code,
                                            video_duration)
                        else:
                            print "unable to find media for %s" % video_settings_url
                except Exception, e:
                    print str(e)