예제 #1
0
def choose():
    profiles = []
    content = utility.decode(connect.load_site(utility.profile_url))
    match = re.compile(
        '"experience":"(.+?)".+?guid":"(.+?)".+?profileName":"(.+?)"',
        re.DOTALL).findall(content)
    for is_kid, token, name in match:
        profile = {
            'name': utility.unescape(name),
            'token': token,
            'is_kid': is_kid == 'jfk'
        }
        profiles.append(profile)
    if len(match) > 0:
        dialog = xbmcgui.Dialog()
        nr = dialog.select(utility.get_string(30103),
                           [profile['name'] for profile in profiles])
        if nr >= 0:
            selected_profile = profiles[nr]
        else:
            selected_profile = profiles[0]
        connect.load_site(utility.profile_switch_url +
                          selected_profile['token'])
        utility.set_setting('selected_profile', selected_profile['token'])
        utility.set_setting('is_kid',
                            'true' if selected_profile['is_kid'] else 'false')
        utility.set_setting('profile_name', selected_profile['name'])
        connect.save_session()
        get_my_list_change_authorisation()
    else:
        utility.log('Choose profile: no profiles were found!',
                    loglevel=xbmc.LOGERROR)
예제 #2
0
def directory(name, url, mode, thumb, type='', context_enable=True):
    entries = []
    name = utility.unescape(name)
    u = sys.argv[0]
    u += '?url=' + urllib.quote_plus(url)
    u += '&mode=' + mode
    u += '&thumb=' + urllib.quote_plus(thumb)
    u += '&type=' + type
    list_item = xbmcgui.ListItem(name)
    list_item.setArt({'icon': 'DefaultTVShows.png', 'thumb': thumb})
    list_item.setInfo(type='video', infoLabels={'title': name})
    if "/my-list" in url:
        entries.append((utility.get_string(30150),
                        'RunPlugin(plugin://%s/?mode=add_my_list_to_library)' %
                        utility.addon_id))
    list_item.setProperty('fanart_image', utility.addon_fanart())
    if context_enable:
        list_item.addContextMenuItems(entries)
    else:
        list_item.addContextMenuItems([], replaceItems=True)
    directory_item = xbmcplugin.addDirectoryItem(handle=plugin_handle,
                                                 url=u,
                                                 listitem=list_item,
                                                 isFolder=True)
    return directory_item
예제 #3
0
파일: nextep.py 프로젝트: osund/pynik
 def fetch_show_info(self, show):
     info = {}
     query_result = utility.read_url(self.URL_API % show)
     raw_data = utility.unescape(query_result['data'].decode('utf-8'), True)
     for m in self.PATTERN_DATA_ENTRY.finditer(raw_data):
         info[m.group('key')] = m.group('value').replace('^', u", ")
     return info
예제 #4
0
def view_activity(video_type, run_as_widget=False):
    count = 0
    video_ids = []
    loading_progress = None
    if not run_as_widget:
        loading_progress = xbmcgui.DialogProgress()
        loading_progress.create('Netflix', utility.get_string(30205) + '...')
        utility.progress_window(loading_progress, 0, '...')
    xbmcplugin.setContent(plugin_handle, 'movies')
    if not xbmcvfs.exists(utility.session_file()):
        login.login()
    content = utility.decode(
        connect.load_site(utility.main_url + '/WiViewingActivity'))
    series_id = re.compile('(<li .*?data-series=.*?</li>)',
                           re.DOTALL).findall(content)
    for i in range(1, len(series_id), 1):
        entry = series_id[i]
        if not run_as_widget:
            utility.progress_window(loading_progress,
                                    (count + 1) * 100 / len(series_id), '...')
        match_id = re.compile('data-movieid="(.*?)"', re.DOTALL).findall(entry)
        if match_id:
            video_id = match_id[0]
        match = re.compile('class="col date nowrap">(.+?)<',
                           re.DOTALL).findall(entry)
        date = match[0]
        match_title1 = re.compile('class="seriestitle">(.+?)</a>',
                                  re.DOTALL).findall(entry)
        match_title2 = re.compile('class="col title">.+?>(.+?)<',
                                  re.DOTALL).findall(entry)
        if match_title1:
            title = utility.unescape(match_title1[0]).replace('</span>', '')
        elif match_title2:
            title = match_title2[0]
        else:
            title = ''
        title = date + ' - ' + title
        if video_id not in video_ids:
            video_ids.append(video_id)
            # due to limitations in the netflix api, there is no way to get the series_id of an
            # episode, so the 4 param is set to True to treat tv episodes the same as movies.
            added = video(video_id, title, '', True, False, video_type, '')
            if added:
                count += 1
            if count == 20:
                break
    if utility.get_setting('force_view') and not run_as_widget:
        xbmc.executebuiltin('Container.SetViewMode(' +
                            utility.get_setting('view_id_activity') + ')')
    xbmcplugin.endOfDirectory(plugin_handle)
예제 #5
0
파일: prisjakt.py 프로젝트: raek/pynik
    def look_up_item(self, url):
        response = utility.read_url(url)
        data = response['data'].decode('latin-1').replace('&nbsp;', u"")
        name_match = self.PATTERN_ITEM_NAME.search(data)
        if not name_match:
            return u"Could not extract product info :("

        name = utility.unescape(name_match.group('name'), True)
        price_match = self.PATTERN_ITEM_PRICE.search(data)
        if price_match:
            price = price_match.group('price')
        else:
            price = u"???:-"

        return u"%s, %s" % (name, price)
예제 #6
0
def view_activity(video_type, run_as_widget=False):
    count = 0
    video_ids = []
    loading_progress = None
    if not run_as_widget:
        loading_progress = xbmcgui.DialogProgress()
        loading_progress.create('Netflix', utility.get_string(30205) + '...')
        utility.progress_window(loading_progress, 0, '...')
    xbmcplugin.setContent(plugin_handle, 'movies')
    if not xbmcvfs.exists(utility.session_file()):
        login.login()
    content = utility.decode(connect.load_site(utility.main_url + '/WiViewingActivity'))
    series_id = re.compile('(<li .*?data-series=.*?</li>)', re.DOTALL).findall(content)
    for i in range(1, len(series_id), 1):
        entry = series_id[i]
        if not run_as_widget:
            utility.progress_window(loading_progress, (count + 1) * 100 / len(series_id), '...')
        match_id = re.compile('data-movieid="(.*?)"', re.DOTALL).findall(entry)
        if match_id:
            video_id = match_id[0]
        match = re.compile('class="col date nowrap">(.+?)<', re.DOTALL).findall(entry)
        date = match[0]
        match_title1 = re.compile('class="seriestitle">(.+?)</a>', re.DOTALL).findall(entry)
        match_title2 = re.compile('class="col title">.+?>(.+?)<', re.DOTALL).findall(entry)
        if match_title1:
            title = utility.unescape(match_title1[0]).replace('</span>', '')
        elif match_title2:
            title = match_title2[0]
        else:
            title = ''
        title = date + ' - ' + title
        if video_id not in video_ids:
            video_ids.append(video_id)
            # due to limitations in the netflix api, there is no way to get the series_id of an
            # episode, so the 4 param is set to True to treat tv episodes the same as movies.
            added = video(video_id, title, '', True, False, video_type, '')
            if added:
                count += 1
            if count == 20:
                break
    if utility.get_setting('force_view') and not run_as_widget:
        xbmc.executebuiltin('Container.SetViewMode(' + utility.get_setting('view_id_activity') + ')')
    xbmcplugin.endOfDirectory(plugin_handle)
예제 #7
0
def get_title(url):
	import urllib
	if not re.search('[^:]+:\/\/', url):
		url = 'http://' + url

	response = utility.read_url(url)
	if response == None:
		return None
	
	data = response["data"]
	data = data.replace("\r", "").replace("\n", "")

	m = re.search('<title[^>]*>\s*(.+?)\s*<\/title>', data, re.IGNORECASE|re.MULTILINE)

	if m:
		title = m.group(1)
		title = re.sub('\s+', ' ', title)
		return utility.unescape(re.sub('<.+?>', '', title))
	else:
		return None
예제 #8
0
def directory(name, url, mode, thumb, type='', context_enable=True):
    entries = []
    name = utility.unescape(name)
    u = sys.argv[0]
    u += '?url=' + urllib.quote_plus(url)
    u += '&mode=' + mode
    u += '&thumb=' + urllib.quote_plus(thumb)
    u += '&type=' + type
    list_item = xbmcgui.ListItem(name)
    list_item.setArt({'icon': 'DefaultTVShows.png', 'thumb': thumb})
    list_item.setInfo(type='video', infoLabels={'title': name})
    if "/my-list" in url:
        entries.append(
            (utility.get_string(30150), 'RunPlugin(plugin://%s/?mode=add_my_list_to_library)' % utility.addon_id))
    list_item.setProperty('fanart_image', utility.addon_fanart())
    if context_enable:
        list_item.addContextMenuItems(entries)
    else:
        list_item.addContextMenuItems([], replaceItems=True)
    directory_item = xbmcplugin.addDirectoryItem(handle=plugin_handle, url=u, listitem=list_item, isFolder=True)
    return directory_item
예제 #9
0
def get_title(url):
    import urllib
    if not re.search('[^:]+:\/\/', url):
        url = 'http://' + url

    response = utility.read_url(url)
    if response == None:
        return None

    data = response["data"]
    data = data.replace("\r", "").replace("\n", "")

    m = re.search('<title[^>]*>\s*(.+?)\s*<\/title>', data,
                  re.IGNORECASE | re.MULTILINE)

    if m:
        title = m.group(1)
        title = re.sub('\s+', ' ', title)
        return utility.unescape(re.sub('<.+?>', '', title))
    else:
        return None
예제 #10
0
파일: igor.py 프로젝트: Cohaesus/Igor
def on_message(self, incoming_message, changesInboxTimestamp, supersedesHistoryMessage, conversation):

    mentioned = False
    respond = False

    if incoming_message.author != account_name:
        message = utility.unescape(''.join(
            BeautifulSoup(incoming_message.body_xml).findAll(
                text=True,
                convertEntities='html'
            )
        ))

        for name in settings.RESPOND_TO:
            if name in message.lower():
                mentioned = True
                respond = message.lower().startswith(name)
                break

        if mentioned and not respond:
            for name, response in settings.BEHAVIORS.iteritems():
                if message.lower().startswith(name):
                    conversation.PostText(response)

        if respond:

            if incoming_message.author.lower() in settings.NAMES_TO_IGNORE:
                conversation.PostText('I\'m not at leisure to answer your question.')
                return

            # @todo Usage log
            #print u'Igor: %s issued command: %s' % (incoming_message.author_displayname, message.encode('utf-8'))

            command_parts = [i.encode('utf-8') for i in shlex.split(message)]

            if len(command_parts) < 2:
                conversation.PostText('Yes, Master?')
            else:
                t = Thread(target=execute_task, args=(command_parts, incoming_message, conversation))
                t.start()
예제 #11
0
def choose():
    profiles = []
    content = utility.decode(connect.load_site(utility.profile_url))
    match = re.compile('"experience":"(.+?)".+?guid":"(.+?)".+?profileName":"(.+?)"', re.DOTALL).findall(content)
    for is_kid, token, name in match:
        profile = {"name": utility.unescape(name), "token": token, "is_kid": is_kid == "jfk"}
        profiles.append(profile)
    if len(match) > 0:
        dialog = xbmcgui.Dialog()
        nr = dialog.select(utility.get_string(30103), [profile["name"] for profile in profiles])
        if nr >= 0:
            selected_profile = profiles[nr]
        else:
            selected_profile = profiles[0]
        connect.load_site(utility.profile_switch_url + selected_profile["token"])
        utility.set_setting("selected_profile", selected_profile["token"])
        utility.set_setting("is_kid", "true" if selected_profile["is_kid"] else "false")
        utility.set_setting("profile_name", selected_profile["name"])
        connect.save_session()
        get_my_list_change_authorisation()
    else:
        utility.log("Choose profile: no profiles were found!", loglevel=xbmc.LOGERROR)
예제 #12
0
def choose():
    profiles = []
    content = utility.decode(connect.load_site(utility.profile_url))
    match = re.compile('"experience":"(.+?)".+?guid":"(.+?)".+?firstName":"(.+?)"', re.DOTALL).findall(content)
    for is_kid, token, name in match:
        profile = {'name': utility.unescape(name), 'token': token, 'is_kid': is_kid == 'jfk'}
        profiles.append(profile)
    if len(match) > 0:
        dialog = xbmcgui.Dialog()
        nr = dialog.select(utility.get_string(30103), [profile['name'] for profile in profiles])
        if nr >= 0:
            selected_profile = profiles[nr]
        else:
            selected_profile = profiles[0]
        connect.load_site(utility.profile_switch_url + selected_profile['token'])
        utility.set_setting('selected_profile', selected_profile['token'])
        utility.set_setting('is_kid', 'true' if selected_profile['is_kid'] else 'false')
        utility.set_setting('profile_name', selected_profile['name'])
        connect.save_session()
        get_my_list_change_authorisation()
    else:
        utility.log('Choose profile: no profiles were found!', loglevel=xbmc.LOGERROR)
예제 #13
0
    def wp_get(self, language, item):
        url = "http://%s.wikipedia.org/wiki/%s" % (
            language, utility.escape(item.replace(" ", "_")))

        response = utility.read_url(url)

        if not response:
            return (None, None)

        data = response["data"]
        url = response["url"]

        # sometimes there is a nasty table containing the first <p>. we can't allow this to happen!
        pattern = re.compile("<table.*?>.+?<\/table>", re.MULTILINE)

        data = re.sub(pattern, "", data)

        m = re.search("<p>(.+?)<\/p>", data)
        if m:
            data = utility.unescape(m.group(1))
            data = re.sub("<.+?>", "", data)
            data = re.sub("\[\d+\]", "", data)

            index = data.rfind(".", 0, 300)

            if index == -1:
                index = 300

            if index + 1 < len(data) and data[index + 1] == '"':
                index += 1

            data = data[0:index + 1]

            if "Wikipedia does not have an article with this exact name." in data:
                data = None
        else:
            data = None

        return (url, data)
예제 #14
0
	def wp_get(self, item):
		url = "http://en.wikipedia.org/wiki/%s" % utility.escape(item.replace(" ", "_"))

		response = utility.read_url(url)

		if not response:
			return (None, None)

		data = response["data"]
		url = response["url"]
		
		# sometimes there is a nasty table containing the first <p>. we can't allow this to happen!
		pattern = re.compile("<table.*?>.+?<\/table>", re.MULTILINE)

		data = re.sub(pattern, "", data)

		m = re.search("<p>(.+?)<\/p>", data)
		if m:
			data = utility.unescape(m.group(1))
			data = re.sub("<.+?>", "", data)
			data = re.sub("\[\d+\]", "", data)

			index = data.rfind(".", 0, 300)

			if index == -1:
				index = 300

			if index+1 < len(data) and data[index+1] == '"':
				index += 1

			data = data[0:index+1]

			if "Wikipedia does not have an article with this exact name." in data:
				data = None
		else:
			data = None

		return (url, data)
예제 #15
0
def prisjakt_product(url):
	# Fetch the web page
	response = utility.read_url(url)
	data = response["data"]
	data = data.replace("&nbsp;", "")

	# Look for title
	title_pattern = "\<h1.*?\>(\<a href=\".+?\"\>)?(.+?)(\<\/a\>)?\<\/h1\>"
	title_match = re.search(title_pattern, data)

	if not title_match:
		# Failure
		return "Could not extract product info :("
	
	# Success
	title = utility.unescape(title_match.group(2))
	
	# Look for price
	price_pattern = "&auml;gsta: \<span class=\"pris\">(.|\n)(\d+:-)\<\/span\>"
	price_match = re.search(price_pattern, data)
	price = price_match.group(2)

	# Done, return info string
	return title + ", " + price + ", " + url
예제 #16
0
def prisjakt_product(url):
	# Fetch the web page
	response = utility.read_url(url)
	data = response["data"]
	data = data.replace("&nbsp;", "")

	# Look for title
	title_pattern = "\<h1.*?\>(\<a href=\".+?\"\>)?(.+?)(\<\/a\>)?\<\/h1\>"
	title_match = re.search(title_pattern, data)

	if not title_match:
		# Failure
		return "Could not extract product info :("
	
	# Success
	title = utility.unescape(title_match.group(2))
	
	# Look for price
	price_pattern = "&auml;gsta: \<span class=\"pris\">(.|\n)(\d+:-)\<\/span\>"
	price_match = re.search(price_pattern, data)
	price = price_match.group(2)

	# Done, return info string
	return title + ", " + price + ", " + url
예제 #17
0
def lith_course_info(code, programme, year):
	# Fetch the study handbook page for the course
	response = utility.read_url(sh_url(code, year))
	sh_data = response["data"]
	
	# Locate the Swedish course name
	m = re.search(
		"\<span class=\"txtbold\"\>\<b\>(.*?), \d{1,2},?\d? p \</b\>",
		sh_data)
	if m:
		name = utility.unescape(m.group(1))
	else:
		return "Hmm, are you sure the course code is " + code + "?"
	
	# Locate the English course name
	m = re.search(
		"\<br\>/(.*?)/\</b\>\</span\>",
		sh_data)
	if m:
		name = name + " (" + utility.unescape(m.group(1)) + ")"
	else:
		print "I couldn't find the English name of the LiTH course " + \
			code + " O.o"
	
	# Locate the number of HP (ECTS) credits
	m = re.search(
		"\<span class=\"txtbold\"\>\<b\> (\d{1,2},?\d?) hp\</span\>\</font\>",
		sh_data)
	if m:
		credits = m.group(1).replace(",", ".")
	else:
		credits = "???"
		#print "I couldn't find the number of credits for the LiTH course " + \
		#	code + " O.o"
	
	# Locate the advancement level
	m = re.search(
		"\<span class=\"txtkursivlista\"\>Utbildningsniv&aring; \(G1,G2,A\):\<\/span\>\<i\> \<\/i\>\<span class=\"txtlista\"\>(.+?)\<\/span\>",
		sh_data)
	if m:
		level = m.group(1)
	else:
		level = "???"
	
	# Fetch the schedule page for the course from the study handbook
	response = utility.read_url(schedule_url(code, programme, year))
	sh_data = response["data"]
	
	# Match study periods
	# (Usually ([HV]t[12]) but some odd courses have other formats, e.g. Ht2a)
	period_m = re.findall(
		"\<td\>\<span class=\"txtlista\"\>\d([HV]t.*?)\</span\>\</td\>",
		sh_data)
	
	# Match blocks
	# (Usually ([1-4]) but some courses have other formats, e.g. "-", "" or "1+2+4")
	block_m = re.findall(
		"&-Token.ksk=\[Field:'k_kurskod'\]\"\>---\>" + "(.*?)" + \
		"\<!---\<\/a\>---\>\<\/span\>\<\/td\>",
		sh_data)
	
	# Build a list of schedule occasions
	schedules = []
	for i in range(len(period_m)):
		# Assemble an occasion string
		match = period_m[i] + "."
		if not block_m[i]:
			match += "?"
		else:
			match += block_m[i].replace(", ", "+")
		
		# Add if not already present
		if match not in schedules:
			schedules.append(match)
	
	# Convert it into a string
	if schedules:
		schedule_text = "Scheduled during " + ", ".join(sorted(schedules))
	else:
		schedule_text = "Not scheduled " + year + "."
	
	# Combine all the information and return it
	return code + ": " + name + ", " + credits + " HP on level " + level + \
			". " + schedule_text + " | " + sh_url(code, year)
예제 #18
0
def system_status(product_id, store_id):
    # Fetch the web page
    url = "http://systembolaget.se/SokDrycker/Produkt?VaruNr=" + product_id + \
      "&Butik=" + store_id
    response = utility.read_url(url)
    if response:
        data = response["data"].replace("\r", "")
    else:
        return "Ingen produkt med det artikelnumret hittades."

    # Look for title
    title_pattern = "class=\"rubrikstor\"\>(.+)\n"
    title_match = re.search(title_pattern, data)

    if not title_match:
        # Failure
        return "Hittade inte produktinfon :("

    # Set product info and store name variables
    title_text = title_match.group(1)

    origin_pattern = "class=\"text_tabell_rubrik\"\>Land\<\/td\>[\n\s]+\<td class=\"text_tabell\"\>(\<B\>\<A.+?\>)?([\w\s]+)(\<\/A\>\<\/B\>)?\<\/td\>"
    origin_text = re.search(origin_pattern, data).group(2)

    percentage_pattern = "class=\"text_tabell_rubrik\"\>Alkoholhalt\<\/td\>[\n\s]+\<td class=\"text_tabell\"\>(.+?)\<\/td\>"
    percentage_text = re.search(percentage_pattern,
                                data).group(1).replace(",", ".")

    store_pattern = "\<option selected=\"selected\" value=\"" + store_id + "\"\>(.+?)\<\/option\>"
    store_text = re.search(store_pattern, data).group(1)

    # Look for available packaging options for this product
    product_pattern = "\<img src=\"\/images\/button_plus\.gif\" class=\"LaggTillMinaVaror\" " + \
      "align=\"absmiddle\" onMouseover=\"ddrivetip\(\'L.gg till i \<b\>Mina varor\<\/b\>\'\)\" " + \
      "onMouseout=\"hideddrivetip\(\)\" onClick=\"LaggTillEnArtikel\(\'\d+\'\);\"\>" + \
      "(.+?)" + "\<\/td\>\<td class=\"text_tabell\" valign=\"Top\" " + \
      "background=\"\/images\/tab_bg_blueline\.gif\" style=\"padding-top:5px;\"\>" + \
      "([\w\s]+)" + "\<\/td\>\<td class=\"text_tabell\" align=\"Right\" valign=\"Top\" " + \
      "background=\"\/images\/tab_bg_blueline\.gif\" style=\"padding-top:5px;\"\>" + \
      "\((.+?)\)" + "\<\/td\>\<td class=\"text10pxfet\" align=\"Right\" valign=\"Top\" " + \
      "background=\"\/images\/tab_bg_blueline\.gif\" width=\"87\" style=\"padding-top:5px;\"\>" + \
      "(.+?)" + "\<\/td\>\<td class=\"text_tabell\" align=\"Left\" valign=\"Top\" " + \
      "background=\"\/images\/tab_bg_blueline\.gif\" width=\"183\" " + \
      "bgcolor=\"#FFFFFF\" style=\"padding-top:5px;\"\>.*?\<\/td\>" + \
      "\<td class=\"text_tabell\" valign=\"Top\" background=\"\/images\/tab_bg_blueline\.gif\" " + \
      "style=\"padding-top:5px;\"\>" + \
      "\<strong\>Lagersaldo: \<\/strong\>(\d+) st&nbsp;&nbsp;&nbsp;\<strong\>" + \
      "Plats i butiken: \<\/strong\>(.+?)\<\/td\>"
    product_iterator = re.finditer(product_pattern, data)
    product_list = []

    for match in product_iterator:
        # An available packaging option has been found, let's calculate the APK value.
        apk_value = float(percentage_text[:-2]) / 100  # He
        apk_value *= float(match.group(2)[:-3])  # V (expected to be in ml)
        apk_value /= float(match.group(4))  # P

        # Add it to the list...
        format_string = "%s %s: %s kr (%s kr/l, APK " + str(round(apk_value, 2)) + \
          "), %s st, hylla %s"
        product_list.append(format_string % match.group(1, 2, 4, 3, 5, 6))

    if not product_list:
        # No available packaging options found
        product_list.append("Varan finns inte i denna butik.")

    # Assemble string
    result_string = "#%s: %s, %s, %s | %s | %s | %s" % \
      (product_id, title_text, origin_text, percentage_text, store_text, " | ".join(product_list), url)

    # Unescape things like &nbsp;
    return utility.unescape(result_string)
예제 #19
0
def video(video_id, title, thumb_url, is_episode, hide_movies, video_type, url):
    added = False
    year = ''
    mpaa = ''
    duration = ''
    description = ''
    director = ''
    genre = ''
    rating = 0.0
    video_details = get.video_info(video_id)
    match = re.compile('<span class="title.*?>(.+?)</span', re.DOTALL).findall(video_details)
    if not title:
        title = match[0].strip()
    match = re.compile('<span class="year.*?>(.+?)</span', re.DOTALL).findall(video_details)
    if match:
        year = match[0].partition('-')[0]
    if not thumb_url:
        match = re.compile('src="(.+?)"', re.DOTALL).findall(video_details)
        thumb_url = match[0].replace('/webp/', '/images/').replace('.webp', '.jpg')
    match = re.compile('<span class="mpaaRating.*?>(.+?)</span', re.DOTALL).findall(video_details)
    if match:
        mpaa = match[0].strip()
    match = re.compile('<span class="duration.*?>(.+?)</span', re.DOTALL).findall(video_details)
    if match:
        duration = match[0].lower()
    if duration.split(' ')[-1].startswith('min'):
        type = 'movie'
        video_type_temp = type
        duration = duration.split(' ')[0]
    else:
        video_type_temp = 'tv'
        if is_episode:
            type = 'episode'
        else:
            type = 'tvshow'
        duration = ''
    if utility.get_setting('use_tmdb') == 'true':
        year_temp = year
        title_temp = title
        if ' - ' in title_temp:
            title_temp = title_temp[title_temp.index(' - '):]
        if '-' in year_temp:
            year_temp = year_temp.split('-')[0]
        filename = utility.clean_filename(video_id) + '.jpg'
        filename_none = utility.clean_filename(video_id) + '.none'
        cover_file = xbmc.translatePath(utility.cover_cache_dir() + filename)
        cover_file_none = xbmc.translatePath(utility.cover_cache_dir() + filename_none)
        if not (xbmcvfs.exists(cover_file) or xbmcvfs.exists(cover_file_none)):
            utility.log('Downloading cover art. type: %s, video_id: %s, title: %s, year: %s' % (video_type_temp,
                                                                                                video_id, title_temp,
                                                                                                year_temp))
            get.cover(video_type_temp, video_id, title_temp, year_temp)
    match = re.compile('src=".+?">.*?<.*?>(.+?)<', re.DOTALL).findall(video_details)
    if match:
        description_temp = match[0]
        # replace all embedded unicode in unicode (Norwegian problem)
        description_temp = description_temp.replace('u2013', unicode('\u2013')).replace('u2026', unicode('\u2026'))
        description = utility.unescape(description_temp)
    match = re.compile('Director:</dt><dd>(.+?)<', re.DOTALL).findall(video_details)
    if match:
        director = match[0].strip()
    match = re.compile('<span class="genre.*?>(.+?)</span', re.DOTALL).findall(video_details)
    if match:
        genre = match[0]
    match = re.compile('<span class="rating">(.+?)</span', re.DOTALL).findall(video_details)
    if len(match) > 0:
        rating = float(match[0])
    title = utility.unescape(title)
    next_mode = 'play_video_main'
    if utility.get_setting('browse_tv_shows') == 'true' and type == 'tvshow':
        next_mode = 'list_seasons'
    if '/my-list' in url and video_type_temp == video_type:
        add.video(title, video_id, next_mode, thumb_url, type, description, duration, year, mpaa,
                  director, genre, rating, remove=True)
        added = True
    elif type == 'movie' and hide_movies:
        pass
    elif video_type_temp == video_type or video_type == 'both':
        add.video(title, video_id, next_mode, thumb_url, type, description, duration, year, mpaa,
                  director, genre, rating)
        added = True
    return added
예제 #20
0
    def trig_google(self, bot, source, target, trigger, argument):
        url = 'http://www.google.com/search?rls=en&q=' + utility.escape(
            argument) + '&ie=UTF-8&oe=UTF-8'

        response = utility.read_url(url)

        data = response["data"]

        data = re.sub(r"\n|\r|\r\n", "", data)
        data = re.sub(r" +", " ", data)

        print data

        # try to extract video result
        m = re.search(
            r'Video results for <em>.*?<\/em>.*?<td valign=top style="padding-right:10px"><a href="(.*?)" class=l.*?>(.*?)</a><br>',
            data)
        if m:
            text = utility.unescape(m.group(2))
            text = re.sub('<.+?>', '', text)
            link = m.group(1)
            return "%s - %s | %s" % (text, link, url)

        # try to extract calculator result
        #m = re.search('<td><img src="\/images\/icons\/onebox\/calculator-40\.gif" ?width=40 height=40 alt=""><td>&nbsp;<td style="vertical-align:top" >(<h2 class=r( style="font-size:\d+%")?>)?<b>(.*?)<\/b>', data)
        m = re.search('.*?font-size:138%">(.*?)<', data)
        if m:
            answer = m.group(1)
            answer = answer.replace(' &#215;', '×').replace('<sup>', '^')
            answer = re.sub('<.+?>', '', answer)
            return answer

        # try to extract definition
        m = re.search(
            '<img src="\/images\/dictblue\.gif" width=40 height=30 alt=""><td valign=top.*?>(.*?)<br>',
            data)
        if m:
            definition = utility.unescape(m.group(1))
            definition = re.sub('<.+?>', '', definition)
            return definition

        # try to extract weather
        m = re.search(
            '<b>Weather<\/b> for <b>(.+?)<\/b>.+?<b>(-?\d+).*C<\/b>.+?Current: <b>(.+?)<\/b>',
            data)

        if m:
            location = m.group(1)
            temperature = m.group(2)
            weather = m.group(3)
            return "%s: %s - %s" % (location, temperature, weather)

        # try to extract time
        m = re.search(
            'alt=""><td valign=middle><b>(.*?)<\/b> .+?day \((.*?)\) - <b>Time</b> in (.*?)<\/table>',
            data)

        if m:
            time = m.group(1)
            timezone = m.group(2)
            location = m.group(3)
            location = re.sub('<.+?>', '', location)

            return "Time in %s: %s (%s)" % (location, time, timezone)

        # try to extract first hit
        m = re.search(
            '<li class=g><h3 class=r><a href="(.*?)".*?>(.*?)<\/a>(.*?)</div>',
            data)
        if m:
            text = utility.unescape(m.group(2))
            text = re.sub('<.+?>', '', text)

            link = m.group(1)

            return "%s - %s | %s" % (text, link, url)
        else:
            return url
예제 #21
0
def system_status(product_id, store_id):
	# Fetch the web page
	url = "http://systembolaget.se/SokDrycker/Produkt?VaruNr=" + product_id + \
			"&Butik=" + store_id
	response = utility.read_url(url)
	if response:
		data = response["data"].replace("\r", "")
	else:
		return "Ingen produkt med det artikelnumret hittades."
	
	# Look for title
	title_pattern = "class=\"rubrikstor\"\>(.+)\n"
	title_match = re.search(title_pattern, data)

	if not title_match:
		# Failure
		return "Hittade inte produktinfon :("
	
	# Set product info and store name variables
	title_text = title_match.group(1)
	
	origin_pattern = "class=\"text_tabell_rubrik\"\>Land\<\/td\>[\n\s]+\<td class=\"text_tabell\"\>(\<B\>\<A.+?\>)?([\w\s]+)(\<\/A\>\<\/B\>)?\<\/td\>"
	origin_text = re.search(origin_pattern, data).group(2)
	
	percentage_pattern = "class=\"text_tabell_rubrik\"\>Alkoholhalt\<\/td\>[\n\s]+\<td class=\"text_tabell\"\>(.+?)\<\/td\>"
	percentage_text = re.search(percentage_pattern, data).group(1).replace(",", ".")
	
	store_pattern = "\<option selected=\"selected\" value=\"" + store_id + "\"\>(.+?)\<\/option\>"
	store_text = re.search(store_pattern, data).group(1)
	
	# Look for available packaging options for this product
	product_pattern = "\<img src=\"\/images\/button_plus\.gif\" class=\"LaggTillMinaVaror\" " + \
			"align=\"absmiddle\" onMouseover=\"ddrivetip\(\'L.gg till i \<b\>Mina varor\<\/b\>\'\)\" " + \
			"onMouseout=\"hideddrivetip\(\)\" onClick=\"LaggTillEnArtikel\(\'\d+\'\);\"\>" + \
			"(.+?)" + "\<\/td\>\<td class=\"text_tabell\" valign=\"Top\" " + \
			"background=\"\/images\/tab_bg_blueline\.gif\" style=\"padding-top:5px;\"\>" + \
			"([\w\s]+)" + "\<\/td\>\<td class=\"text_tabell\" align=\"Right\" valign=\"Top\" " + \
			"background=\"\/images\/tab_bg_blueline\.gif\" style=\"padding-top:5px;\"\>" + \
			"\((.+?)\)" + "\<\/td\>\<td class=\"text10pxfet\" align=\"Right\" valign=\"Top\" " + \
			"background=\"\/images\/tab_bg_blueline\.gif\" width=\"87\" style=\"padding-top:5px;\"\>" + \
			"(.+?)" + "\<\/td\>\<td class=\"text_tabell\" align=\"Left\" valign=\"Top\" " + \
			"background=\"\/images\/tab_bg_blueline\.gif\" width=\"183\" " + \
			"bgcolor=\"#FFFFFF\" style=\"padding-top:5px;\"\>.*?\<\/td\>" + \
			"\<td class=\"text_tabell\" valign=\"Top\" background=\"\/images\/tab_bg_blueline\.gif\" " + \
			"style=\"padding-top:5px;\"\>" + \
			"\<strong\>Lagersaldo: \<\/strong\>(\d+) st&nbsp;&nbsp;&nbsp;\<strong\>" + \
			"Plats i butiken: \<\/strong\>(.+?)\<\/td\>"
	product_iterator = re.finditer(product_pattern, data)
	product_list = []
	
	for match in product_iterator:
		# An available packaging option has been found, let's calculate the APK value.
		apk_value = float(percentage_text[:-2]) / 100  # He
		apk_value *= float(match.group(2)[:-3])        # V (expected to be in ml)
		apk_value /= float(match.group(4))             # P
		
		# Add it to the list...
		format_string = "%s %s: %s kr (%s kr/l, APK " + str(round(apk_value, 2)) + \
				"), %s st, hylla %s"
		product_list.append(format_string % match.group(1, 2, 4, 3, 5, 6))
	
	if not product_list:
		# No available packaging options found
		product_list.append("Varan finns inte i denna butik.")
		
	# Assemble string
	result_string = "#%s: %s, %s, %s | %s | %s | %s" % \
			(product_id, title_text, origin_text, percentage_text, store_text, " | ".join(product_list), url)
	
	# Unescape things like &nbsp;
	return utility.unescape(result_string)
예제 #22
0
파일: mat.py 프로젝트: plux/pynik
def menu(location):
	# Set location-specific settings
	if location == "[hg]" or location == "hg":
		# Ryds Herrgård [hg], Linköping
		url = "http://www.hg.se/?restaurang/kommande"
		
		entry_regex = '\<h2\>(.+?dag)en den .+?\<\/h2\>(.+?)(?=(\<h2\>|\<em\>))'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = '\<p\>(.+?)\<br\>(.+?((\d+?) kr))?'
		dish_name_index = 0
		dish_price_index = 3
	
	elif location == "villevalla" or location == "vvp":
		# VilleValla Pub, Linköping
		url = "http://www.villevallapub.se/"
		
		entry_regex = '\<td valign="top" style="padding-right: 2px;"\>\<strong\>(.+?dag)\<\/strong\>\<\/td\>\s*\<td\>(.+?)\<\/td\>'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = '\A(.+?) ((\d+?) :-)\Z'
		dish_name_index = 0
		dish_price_index = 2
	
	elif location == "karallen" or location == "kara":
		# Restaurang Kårallen, LiU
		
		# Oh well... The Kårallen guys apparently don't know what they are doing.
		# For now, let's hope this pattern continues.
		url = "http://www.cgnordic.com/sv/Eurest-Sverige/Restauranger/Restaurang-Karallen-Linkopings-universitet/Lunchmeny-"
		if (int(datetime.now().strftime("%W"))+1) % 2: # TODO use better week function
			url += "v-13/"
		else:
			url += "v-15/"
		
		header_regex = '\<strong\>(.+?dag).+?\<\/strong\>'
		
		entry_regex = header_regex + '(\<\/p\>)?\<\/td\>\<\/tr\>(.+?)(?=(' + header_regex + '|\<p\>Pris dagens:))'
		entry_day_index = 0
		entry_data_index = 2
		
		dish_regex = '\<\/td\>\s+\<td\>(\s+\<p( align="[a-z]+")?\>)?([^\<]+?)(\<\/p\>)?\<\/td\>\<\/tr\>()'
		dish_name_index = 2
		dish_price_index = 4 # Dummy index.
	
	elif location == "blamesen" or location == "galaxen":
		# Restaurang Blåmesen, Galaxen, LiU
		
		url = "http://davidg.nu/lunch/blamesen.php?price"
		entry_regex = '([A-Za-zåäö]{3,4}dag)(.+?)(?=([A-Za-zåäö]{3,4}dag|$))'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = ': (.+?) \((\d+) kr\)'
		
		dish_name_index = 0
		dish_price_index = 1
		
	elif location == "zenit":
		# Restaurang & Café Zenit, LiU
		url = "http://www.hors.se/restauranger/restaurant_meny.php3?UID=24"
		
		entry_regex = '\<tr\>\<td valign="top" colspan="3"\>\<b\>(.+?dag)\<\/b\>\<\/td\>\<\/tr>(.+?)(\<tr\>\<td colspan="3"\>\<hr\>\<\/td\>\<\/tr\>|Veckans Bistro)'
		entry_day_index = 0
		entry_data_index = 1
		
		# This used to be some clever (?) regex to handle special cases that are
		# possibly not applicable now.
		# \xa4 == ¤
		dish_regex = '(\<td valign="top"\>|\<br \/\>\s*)\xa4 (.+?)(\<br \/\>|\<\/td\>)()'
		dish_name_index = 1
		dish_price_index = 3 # Dummy index.
		
	else:
		return [] # Not implemented yet
	
	# Settings are correct, now it's time to actually do something.
	
	# Fetch the web page
	response = utility.read_url(url)
	if not response:
		return []
	data = response["data"]
	data = utility.unescape(data.replace("\n", ""))
	data = data.replace(utility.unescape("&nbsp;"), " ")
	
	#return data
	
	# Build the menu
	menu = []
	for entry in re.findall(entry_regex, data):
		#print entry
		day = entry[entry_day_index]
		dishes = []
		
		for dish in re.findall(dish_regex, entry[entry_data_index]):
			#print dish
			dish_name = dish[dish_name_index].strip()
			dish_name = re.sub('\s+', ' ', dish_name)
			
			if not dish_name:
				pass # Odd input or bad regex
			elif dish_name.find(">") != -1:
				print "Hmm, I got an odd dish from " + location + ": " + dish_name
			elif dish[dish_price_index]:
				# Price found, let's add it
				dishes.append(dish_name + " (" + dish[dish_price_index] + " kr)")
			else:
				# No price, exclude it
				dishes.append(dish_name)
		
		menu.append((day, dishes))
	
	# Done!
	return menu
예제 #23
0
	def trig_google(self, bot, source, target, trigger, argument):
		url = 'http://www.google.com/search?rls=en&q=' + utility.escape(argument) + '&ie=UTF-8&oe=UTF-8'

		response = utility.read_url(url)

		data = response["data"]

		#print data

		# try to extract video result
		m = re.search(r'Video results for <em>.*?<\/em>.*?<td valign=top style="padding-right:10px"><a href="(.*?)" class=l.*?>(.*?)</a><br>',data)
		if m:
			text = utility.unescape(m.group(2))
			text = re.sub('<.+?>', '', text) 
			link = m.group(1)
			return "%s - %s | %s" % (text, link, url) 

		# try to extract calculator result
		m = re.search('<td><img src=\/images\/calc_img\.gif width=40 height=30 alt=""><td>&nbsp;<td nowrap (dir=ltr)?>(<h2 class=r( style="font-size:\d+%")?>)?<b>(.*?)<\/b>', data)
		if m:
			answer = m.group(4)
			answer = answer.replace(' &#215;', '\xd7').replace('<sup>', '^')
			answer = re.sub('<.+?>', '', answer)
			return answer

		# try to extract definition
		m = re.search('<img src="\/images\/dictblue\.gif" width=40 height=30 alt=""><td valign=top.*?>(.*?)<br>', data)
		if m:
			definition = utility.unescape(m.group(1))
			definition = re.sub('<.+?>', '', definition)
			return definition

		# try to extract weather
		m = re.search('<b>Weather<\/b> for <b>(.+?)<\/b>.+?<b>(-?\d+).*C<\/b>.+?Current: <b>(.+?)<\/b>', data)

		if m:
			location = m.group(1)
			temperature = m.group(2)
			weather = m.group(3)
			return "%s: %s - %s" % (location, temperature, weather)

		# try to extract time
		m = re.search('alt=""><td valign=middle><b>(.*?)<\/b> .+?day \((.*?)\) - <b>Time</b> in (.*?)<\/table>', data)

		if m:
			time = m.group(1)
			timezone = m.group(2)
			location = m.group(3)
			location = re.sub('<.+?>', '', location)

			return "Time in %s: %s (%s)" % (location, time, timezone)
			
		
		
		# try to extract first hit
		m = re.search('<li class=g><h3 class=r><a href="(.*?)".*?>(.*?)<\/a>(.*?)</div>', data)
		if m:
			text = utility.unescape(m.group(2))
			text = re.sub('<.+?>', '', text)

			link = m.group(1)

			return "%s - %s | %s" % (text, link, url)
		else:
			return url
예제 #24
0
def menu(location):
	# Set location-specific settings
	if location == "[hg]" or location == "hg":
		# Ryds Herrgård [hg], Linköping
		url = "http://www.hg.se/?restaurang/kommande"
		
		entry_regex = '\<h2\>(.+?dag)en den .+?\<\/h2\>(.+?)(?=(\<h2\>|\<em\>))'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = '\<p\>(.+?)\<br\>(.+?((\d+?) kr))?'
		dish_name_index = 0
		dish_price_index = 3
	
	elif location == "villevalla" or location == "vvp":
		# VilleValla Pub, Linköping
		url = "http://www.villevallapub.se/"
		
		entry_regex = '\<td valign="top" style="padding-right: 2px;"\>\<strong\>(.+?dag)\<\/strong\>\<\/td\>\s*\<td\>(.+?)\<\/td\>'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = '\A(.+?) ((\d+?) :-)\Z'
		dish_name_index = 0
		dish_price_index = 2
	
	elif location == "karallen" or location == "kara":
		# Restaurang Kårallen, LiU
		
		# Oh well... The Kårallen guys apparently don't know what they are doing.
		# Until someone implements code that parses out the link to the current
		# menu page, this hack will have to do:
		url = "http://www.cgnordic.com/sv/Eurest-Sverige/Restauranger/Restaurang-Karallen-Linkopings-universitet/Lunchmeny-"
		week_number = int(datetime.now().strftime("%V"))
		if (week_number % 2) == 1:
			url += "v-13/"
		else:
			url += "v-15/"
		
		header_regex = '\<strong\>(.+?dag).+?\<\/strong\>'
		
		entry_regex = header_regex + '(\<\/p\>)?\<\/td\>\<\/tr\>(.+?)(?=(' + header_regex + '|\<p\>Pris dagens:))'
		entry_day_index = 0
		entry_data_index = 2
		
		dish_regex = '\<\/td\>\s+\<td\>(\s+\<p( align="[a-z]+")?\>)?([^\<]+?)(\<\/p\>)?\<\/td\>\<\/tr\>()'
		dish_name_index = 2
		dish_price_index = 4 # Dummy index.
	
	elif location == "blamesen" or location == "galaxen":
		# Restaurang Blåmesen, Galaxen, LiU
		
		url = "http://davidg.nu/lunch/blamesen.php?price"
		entry_regex = '([A-Za-zåäö]{3,4}dag)(.+?)(?=([A-Za-zåäö]{3,4}dag|$))'
		entry_day_index = 0
		entry_data_index = 1
		
		dish_regex = ': (.+?) \((\d+) kr\)'
		
		dish_name_index = 0
		dish_price_index = 1
		
	elif location == "zenit":
		# Restaurang & Café Zenit, LiU
		url = "http://hors.se/new_site/restauranter_pdf.php?UID=24"

		header_regex = '\<b\>(.+?dag) [\d]{2}-[A-Za-z]{3}\<\/b\>'
		
		entry_regex = header_regex + '(.+?)(?=(' + header_regex + '|\<td width="\d+px" valign="top"\>Veckans|\<\/html\>))'
		entry_day_index = 0
		entry_data_index = 1

		dish_regex = '\<td valign="top"\>([^\<]+)\<\/td\>\<td width="\d+px" valign="top"\>()'
		dish_name_index = 0
		dish_price_index = 1 # Dummy index.
		
	else:
		return [] # Not implemented yet
	
	# Settings are correct, now it's time to actually do something.
	
	# Fetch the web page
	response = utility.read_url(url)
	if not response:
		return []
	data = response["data"]
	data = utility.unescape(data.replace("\n", ""))
	data = data.replace(utility.unescape("&nbsp;"), " ")
	
	#print data
	
	# Build the menu
	menu = []
	for entry in re.findall(entry_regex, data):
		#print entry
		day = entry[entry_day_index]
		dishes = []
		
		for dish in re.findall(dish_regex, entry[entry_data_index]):
			#print dish
			dish_name = dish[dish_name_index].strip()
			dish_name = re.sub('\s+', ' ', dish_name)
			
			if not dish_name:
				pass # Odd input or bad regex
			elif dish_name.find(">") != -1:
				error_handler.output_message("Hmm, 'mat' got an odd dish from " + location + ": " + dish_name)
			elif dish[dish_price_index]:
				# Price found, let's add it
				dishes.append(dish_name + " (" + dish[dish_price_index] + " kr)")
			else:
				# No price, exclude it
				dishes.append(dish_name)
		
		menu.append((day, dishes))
	
	# Done!
	return menu
예제 #25
0
파일: calc.py 프로젝트: osund/pynik
 def unescape_response(self, response):
     """Unescapes hex characters (e.g. \xab) and HTML entities."""
     response = self.PATTERN_HEX_CHAR.sub(self.unescape_hex_helper, response)
     return utility.unescape(response, True)
예제 #26
0
def lith_course_info(code, programme, year):
    # Fetch the study handbook page for the course
    response = utility.read_url(sh_url(code, year))
    sh_data = response["data"]

    # Locate the Swedish course name
    m = re.search(
        "\<span class=\"txtbold\"\>\<b\>(.*?), \d{1,2},?\d? p \</b\>", sh_data)
    if m:
        name = utility.unescape(m.group(1))
    else:
        return "Hmm, are you sure the course code is " + code + "?"

    # Locate the English course name
    m = re.search("\<br\>/(.*?)/\</b\>\</span\>", sh_data)
    if m:
        name = name + " (" + utility.unescape(m.group(1)) + ")"
    else:
        print "I couldn't find the English name of the LiTH course " + \
         code + " O.o"

    # Locate the number of HP (ECTS) credits
    m = re.search(
        "\<span class=\"txtbold\"\>\<b\> (\d{1,2},?\d?) hp\</span\>\</font\>",
        sh_data)
    if m:
        credits = m.group(1).replace(",", ".")
    else:
        credits = "???"
        #print "I couldn't find the number of credits for the LiTH course " + \
        #	code + " O.o"

    # Locate the advancement level
    m = re.search(
        "\<span class=\"txtkursivlista\"\>Utbildningsniv&aring; \(G1,G2,A\):\<\/span\>\<i\> \<\/i\>\<span class=\"txtlista\"\>(.+?)\<\/span\>",
        sh_data)
    if m:
        level = m.group(1)
    else:
        level = "???"

    # Fetch the schedule page for the course from the study handbook
    response = utility.read_url(schedule_url(code, programme, year))
    sh_data = response["data"]

    # Match study periods
    # (Usually ([HV]t[12]) but some odd courses have other formats, e.g. Ht2a)
    period_m = re.findall(
        "\<td\>\<span class=\"txtlista\"\>\d([HV]t.*?)\</span\>\</td\>",
        sh_data)

    # Match blocks
    # (Usually ([1-4]) but some courses have other formats, e.g. "-", "" or "1+2+4")
    block_m = re.findall(
     "&-Token.ksk=\[Field:'k_kurskod'\]\"\>---\>" + "(.*?)" + \
     "\<!---\<\/a\>---\>\<\/span\>\<\/td\>",
     sh_data)

    # Build a list of schedule occasions
    schedules = []
    for i in range(len(period_m)):
        # Assemble an occasion string
        match = period_m[i] + "."
        if not block_m[i]:
            match += "?"
        else:
            match += block_m[i].replace(", ", "+")

        # Add if not already present
        if match not in schedules:
            schedules.append(match)

    # Convert it into a string
    if schedules:
        schedule_text = "Scheduled during " + ", ".join(sorted(schedules))
    else:
        schedule_text = "Not scheduled " + year + "."

    # Combine all the information and return it
    return code + ": " + name + ", " + credits + " HP on level " + level + \
      ". " + schedule_text + " | " + sh_url(code, year)
예제 #27
0
def video(video_id, title, thumb_url, is_episode, hide_movies, video_type,
          url):
    added = False
    year = ''
    mpaa = ''
    duration = ''
    description = ''
    director = ''
    genre = ''
    rating = 0.0
    video_details = get.video_info(video_id)
    match = re.compile('<span class="title.*?>(.+?)</span',
                       re.DOTALL).findall(video_details)
    if not title:
        title = match[0].strip()
    match = re.compile('<span class="year.*?>(.+?)</span',
                       re.DOTALL).findall(video_details)
    if match:
        year = match[0].partition('-')[0]
    if not thumb_url:
        match = re.compile('src="(.+?)"', re.DOTALL).findall(video_details)
        thumb_url = match[0].replace('/webp/',
                                     '/images/').replace('.webp', '.jpg')
    match = re.compile('<span class="mpaaRating.*?>(.+?)</span',
                       re.DOTALL).findall(video_details)
    if match:
        mpaa = match[0].strip()
    match = re.compile('<span class="duration.*?>(.+?)</span',
                       re.DOTALL).findall(video_details)
    if match:
        duration = match[0].lower()
    if duration.split(' ')[-1].startswith('min'):
        type = 'movie'
        video_type_temp = type
        duration = duration.split(' ')[0]
    else:
        video_type_temp = 'tv'
        if is_episode:
            type = 'episode'
        else:
            type = 'tvshow'
        duration = ''
    if utility.get_setting('use_tmdb') == 'true':
        year_temp = year
        title_temp = title
        if ' - ' in title_temp:
            title_temp = title_temp[title_temp.index(' - '):]
        if '-' in year_temp:
            year_temp = year_temp.split('-')[0]
        filename = utility.clean_filename(video_id) + '.jpg'
        filename_none = utility.clean_filename(video_id) + '.none'
        cover_file = xbmc.translatePath(utility.cover_cache_dir() + filename)
        cover_file_none = xbmc.translatePath(utility.cover_cache_dir() +
                                             filename_none)
        if not (xbmcvfs.exists(cover_file) or xbmcvfs.exists(cover_file_none)):
            utility.log(
                'Downloading cover art. type: %s, video_id: %s, title: %s, year: %s'
                % (video_type_temp, video_id, title_temp, year_temp))
            get.cover(video_type_temp, video_id, title_temp, year_temp)
    match = re.compile('src=".+?">.*?<.*?>(.+?)<',
                       re.DOTALL).findall(video_details)
    if match:
        description_temp = match[0]
        # replace all embedded unicode in unicode (Norwegian problem)
        description_temp = description_temp.replace('u2013',
                                                    unicode('\u2013')).replace(
                                                        'u2026',
                                                        unicode('\u2026'))
        description = utility.unescape(description_temp)
    match = re.compile('Director:</dt><dd>(.+?)<',
                       re.DOTALL).findall(video_details)
    if match:
        director = match[0].strip()
    match = re.compile('<span class="genre.*?>(.+?)</span',
                       re.DOTALL).findall(video_details)
    if match:
        genre = match[0]
    match = re.compile('<span class="rating">(.+?)</span',
                       re.DOTALL).findall(video_details)
    if len(match) > 0:
        rating = float(match[0])
    title = utility.unescape(title)
    next_mode = 'play_video_main'
    if utility.get_setting('browse_tv_shows') == 'true' and type == 'tvshow':
        next_mode = 'list_seasons'
    if '/my-list' in url and video_type_temp == video_type:
        add.video(title,
                  video_id,
                  next_mode,
                  thumb_url,
                  type,
                  description,
                  duration,
                  year,
                  mpaa,
                  director,
                  genre,
                  rating,
                  remove=True)
        added = True
    elif type == 'movie' and hide_movies:
        pass
    elif video_type_temp == video_type or video_type == 'both':
        add.video(title, video_id, next_mode, thumb_url, type, description,
                  duration, year, mpaa, director, genre, rating)
        added = True
    return added