def _getDOMElements(item, name, attrs): lst = [] for key in attrs: lst2 = re.compile( '(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item) if len(lst2) == 0 and attrs[key].find( " ") == -1: # Try matching without quotation marks lst2 = re.compile( '(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item) if len(lst) == 0: # log_debug("Setting main list " + repr(lst2)) lst = lst2 lst2 = [] else: # log_debug("Setting new list " + repr(lst2)) test = list(range(len(lst))) test.reverse() for i in test: # Delete anything missing from the next list. if not lst[i] in lst2: # log_debug("Purging mismatch " + str(len(lst)) + " - " + repr(lst[i])) del (lst[i]) if len(lst) == 0 and attrs == {}: # log_debug("No list found, trying to match on name only") lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item) if len(lst) == 0: lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item) # log_debug("Done: " + str(type(lst))) return lst
def news_episodes_listing(self, query): threads = [] for i in list(range(1, 101)): threads.append(workers.Thread(self.thread, i, self.newsgr_link_ajax.format(page=str(i), category=query))) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] html = '\n'.join(self.data) items = client.parseDOM(html, 'div', attrs={'class': 'newsItem'}) for item in items: label = client.replaceHTMLCodes(client.parseDOM(item, 'a')[1]) title = u'[CR]'.join([label, client.parseDOM(item, 'time')[0]]) image = client.parseDOM(item, 'img', ret='src')[0] url = client.parseDOM(item, 'a', ret='href')[1] self.list.append({'title': title, 'image': image, 'url': url}) return self.list
def _paper_index(link): base_img_url = 'https://image.isu.pub/' html = client.request(link) script = client.parseDOM(html, 'script', attrs={'type': 'application/javascript'})[-2] data = json.loads(script.partition(' = ')[2].rstrip(';')) document = data['document'] total_pages = int(document['pageCount']) menu = [] for page in list(range(1, total_pages + 1)): title = document['title'] + ' - ' + control.lang(30003) + ' ' + str( page) page_img = base_img_url + document[ 'id'] + '/jpg/page_{0}_thumb_large.jpg'.format(str(page)) page_url = base_img_url + document['id'] + '/jpg/page_{0}.jpg'.format( str(page)) data = {'title': title, 'image': page_img, 'url': page_url} menu.append(data) return menu
def radios(): images = [ ''.join([RADIO_LINK, i]) for i in [ '/wp-content/uploads/2016/06/proto.jpg', '/wp-content/uploads/2016/06/deytero.jpg', '/wp-content/uploads/2016/06/trito.jpg', '/wp-content/uploads/2016/06/kosmos.jpg', '/wp-content/uploads/2016/06/VoiceOgGreece.png', '/wp-content/uploads/2016/06/eraSport.jpg', '/wp-content/uploads/2016/06/958fm.jpg', '/wp-content/uploads/2016/06/102fm.jpg' ] ] names = [control.lang(n) for n in list(range(30028, 30036))] urls = [ ''.join([RADIO_STREAM, i]) for i in [ '/ert-proto', '/ert-deftero', '/ert-trito', '/ert-kosmos', '/ert-voiceofgreece', '/ert-erasport', '/ert-958fm', '/ert-102fm' ] ] _radios = map(lambda x, y, z: (x, y, z), names, images, urls) self_list = [] for title, image, link in _radios: self_list.append({ 'title': title, 'url': link, 'image': image, 'action': 'play', 'isFolder': 'False', 'fanart': control.addonmedia('radio_fanart.jpg') }) self_list.insert( 4, { 'title': 'Zeppelin Radio 106.1', 'action': 'play', 'isFolder': 'False', 'url': ''.join([RADIO_STREAM, '/ert-zeppelin']), 'image': 'https://i.imgur.com/ep3LptZ.jpg', 'fanart': control.addonmedia('zeppelin_bg.jpg') }) _district = { 'title': control.lang(30027), 'action': 'district', 'icon': 'district.jpg', 'fanart': control.addonmedia('radio_fanart.jpg') } self_list.append(_district) directory.add(self_list)
def items_list(self, url): page = url result = client.request(page) try: if "contentContainer_totalpages" in result: totalPages = int( re.search(r'contentContainer_totalpages = (\d+);', result).group(1)) seriesId = re.search(r'/templates/data/morevideos\?aid=(\d+)', result).group(1) items = [] threads = [] for i in list(range(1, totalPages + 1)): threads.append( workers.Thread( self.thread, self.more_videos + seriesId + "&p=" + str(i), i - 1)) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] for i in self.data: items.extend(client.parseDOM(i, "article")) else: items = client.parseDOM(result, "article") except: pass for item in items: try: title = client.parseDOM(item, "h2")[0] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') link = client.parseDOM(item, "a", ret="href")[0] if re.match(r'/.+/(\d+)/.+', link) is not None: episodeId = re.search(r'/.+/(\d+)/.+', link).group(1) episodeJSON = client.request(self.episodes_link + episodeId) episodeJSON = json.loads(episodeJSON) url = episodeJSON['url'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') else: url = self.base_link + link + '/videos' image = client.parseDOM(item, "img", ret="src")[0] image = client.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def videos_list(self, url, lang): try: request = urlencode({'request': self.post_link % (url, lang)}) result = client.request(self.api_link, post=request) result = json.loads(result) items = [] if 'themedetailslist' in result: items = result['themedetailslist'] elif 'programDetailsList' in result: items = result['programDetailsList'] elif 'homelist' in result: items = result['homelist'] except: return for item in items: try: title = item['title'] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = str(item['id']) url = url.encode('utf-8') image = self.img2_link % (url, url) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass threads = [] for i in list(range(0, len(self.list))): threads.append(workers.Thread(self.list_worker, i, lang)) [i.start() for i in threads] [i.join() for i in threads] self.list = [ i for i in self.list if 'check' in i and not (i['check'] == '' or i['check'] is None) ] return self.list
def prevent_failure(): for i in range(0, 500): if control.condVisibility('Window.IsActive(busydialog)'): control.sleep(100) else: control.execute('Dialog.Close(all,true)') break
def switch(query): pages = [control.lang(30026).format(i) for i in list(range(1, int(query) + 1))] choice = control.selectDialog(pages, heading=control.lang(30028)) if choice != -1: control.setSetting('page', str(choice)) control.sleep(200) control.refresh()
def list_divider(list_, chunks): """ This function can split a list into smaller parts. Can help creating pages :param list_: A list, can be a list of dictionaries :param chunks: How many items are required on each item of the final list :return: List of lists """ return [list_[i:i + chunks] for i in range(0, len(list_), chunks)]
def _playlist(self, url, limit): try: result = client.request(url) result = json.loads(result) items = result['items'] except Exception: log_debug( 'Youtube: Could not fetch items from the cdn, invalid key or no quota left' ) return for i in list(range(1, limit)): try: if not 'nextPageToken' in result: raise Exception next = url + '&pageToken=' + result['nextPageToken'] result = client.request(next) result = json.loads(result) items += result['items'] except Exception: pass for item in items: try: title = item['snippet']['title'] try: title = py2_enc(title) except AttributeError: pass url = item['id'] try: url = py2_enc(url) except AttributeError: pass image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception try: image = py2_enc(image) except AttributeError: pass self.list.append({'title': title, 'url': url, 'image': image}) except Exception: pass return self.list
def _playlist(self, url, limit): try: result = client.request(url) result = json.loads(result) items = result['items'] except Exception: pass for i in list(range(1, limit)): try: if not 'nextPageToken' in result: raise Exception next = url + '&pageToken=' + result['nextPageToken'] result = client.request(next) result = json.loads(result) items += result['items'] except Exception: pass for item in items: try: title = item['snippet']['title'] try: title = title.encode('utf-8') except AttributeError: pass url = item['id'] try: url = url.encode('utf-8') except AttributeError: pass image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception try: image = image.encode('utf-8') except AttributeError: pass self.list.append({'title': title, 'url': url, 'image': image}) except Exception: pass return self.list
def mag_index(self, url): number = int(client.request(url + '/pages')) pages = [] for page in list(range(1, number + 1)): string = str(page) title = control.lang(30026) + ' ' + string if len(string) == 2: image = url + '/thumbs' + '/thumb-' + string + '.jpg' link = url + '/page-' + string + '.jpg' else: image = url + '/thumbs' + '/thumb-' + '0' + string + '.jpg' link = url + '/page-' + '0' + string + '.jpg' data = {'title': title, 'image': image, 'url': link} pages.append(data) for p in pages: li = control.item(label=p['title'], iconImage=p['image'], thumbnailImage=p['image']) li.setArt({ 'poster': p['image'], 'thumb': p['image'], 'fanart': control.addonInfo('fanart') }) li.setInfo('image', { 'title': p['title'], 'picturepath': p['image'] }) path = p['url'] control.addItem(int(self.argv[1]), path, li, False) control.directory(int(self.argv[1]))
def radios(self): images = [ ''.join([self.radio_link, i]) for i in [ '/wp-content/uploads/2016/06/proto.jpg', '/wp-content/uploads/2016/06/deytero.jpg', '/wp-content/uploads/2016/06/trito.jpg', '/wp-content/uploads/2016/06/kosmos.jpg', '/wp-content/uploads/2016/06/VoiceOgGreece.png', '/wp-content/uploads/2016/06/eraSport.jpg', '/wp-content/uploads/2016/06/958fm.jpg', '/wp-content/uploads/2016/06/102fm.jpg' ] ] names = [control.lang(n) for n in list(range(30028, 30036))] urls = [ ''.join([self.radio_stream, i]) for i in [ '/ert-proto', '/ert-deftero', '/ert-trito', '/ert-kosmos', '/ert-voiceofgreece', '/ert-erasport', '/ert-958fm', '/ert-102fm' ] ] radios = map(lambda x, y, z: (x, y, z), names, images, urls) for title, image, link in radios: self.list.append( { 'title': title, 'url': link, 'image': image, 'action': 'play', 'isFolder': 'False', 'fanart': control.addonmedia('radio_fanart.jpg') } ) district = { 'title': control.lang(30027), 'action': 'district', 'icon': 'district.jpg', 'fanart': control.addonmedia('radio_fanart.jpg') } self.list.append(district) directory.add(self.list)
def items_list(self, url, post=None): indexer = urlparse(url).query ################################################################################################ # # if 'movies.php' in url: # length = 9 # elif all(['shortfilm.php' in url, 'theater.php' in url]): # length = 6 # # else: # length = 2 # # # ################################################################################################ for year in list(range(1, length)): if indexer.startswith('l='): p = 'y=' + str(year) + '&g=&p=' elif indexer.startswith('g='): p = 'y=' + str(year) + '&l=&p=' elif indexer.startswith('p='): p = 'y=' + str(year) + '&l=&g=' elif indexer.startswith('c='): p = 'y=' + str(year) + '&l=&g=' else: p = '' self.years.append(p) if indexer.startswith( ('l=', 'g=', 's=', 'p=', 'c=') ) and 'movies.php' in url or 'shortfilm.php' in url or 'theater.php' in url: for content in self.years: links = GM_BASE + url.rpartition('/')[2].partition( '&')[0] + '&' + content try: htmls = client.request(links).decode('utf-8') except AttributeError: htmls = client.request(links) self.data.append(htmls) result = u''.join(self.data) content = client.parseDOM( result, 'div', attrs={'class': 'col-xs-6 col-sm-4 col-md-3'}) else: html = client.request(url, post=post) content = client.parseDOM( html, 'div', attrs={'class': 'col-xs-6 col-sm-4 col-md-3'}) contents = ''.join(content) items = re.findall('(<a.*?href.*?div.*?</a>)', contents, re.U) for item in items: title = client.parseDOM(item, 'h4')[0] image = client.parseDOM(item, 'img', ret='src')[0] name = title.rpartition(' (')[0] image = urljoin(GM_BASE, image) link = client.parseDOM(item, 'a', ret='href')[0] link = urljoin(GM_BASE, link) year = re.findall(r'.*?\((\d{4})', title, re.U)[0] self.list.append({ 'title': title, 'url': link, 'image': image, 'year': int(year), 'name': name }) return self.list
def _video_list(self, cid, url, pagination, limit): try: result = client.request(url) result = json.loads(result) items = result['items'] except Exception: pass for i in list(range(1, limit)): try: if pagination is True: raise Exception if not 'nextPageToken' in result: raise Exception page = url + '&pageToken=' + result['nextPageToken'] result = client.request(page) result = json.loads(result) items += result['items'] except Exception: pass try: if pagination is False: raise Exception next = cid + '&pageToken=' + result['nextPageToken'] except Exception: next = '' for item in items: try: title = item['snippet']['title'] try: title = title.encode('utf-8') except AttributeError: pass try: url = item['snippet']['resourceId']['videoId'] except (KeyError, ValueError): url = item['id']['videoId'] try: url = url.encode('utf-8') except AttributeError: pass image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception try: image = image.encode('utf-8') except AttributeError: pass try: dateadded = item['snippet']['publishedAt'] dateadded = str(iso8601.parse_date(dateadded).strftime('%Y-%m-%d %H:%M:%S')) except Exception: dateadded = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) date = '.'.join(dateadded.split()[0].split('-')[::-1]) data = { 'title': title, 'url': url, 'image': image, 'dateadded': dateadded, 'date': date, 'premiered': dateadded.split()[0], 'aired': dateadded.split()[0], 'year': int(dateadded[:4]) } if next != '': data['next'] = next self.list.append(data) except Exception: pass try: u = [list(range(0, len(self.list)))[i:i+50] for i in list(range(len(list(range(0, len(self.list))))))[::50]] u = [','.join([self.list[x]['url'] for x in i]) for i in u] u = [self.content_link % i + self.key_link for i in u] threads = [] for i in list(range(0, len(u))): threads.append(workers.Thread(self.thread, u[i], i)) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] items = [] for i in self.data: items += json.loads(i)['items'] except Exception: pass for item in list(range(0, len(self.list))): try: vid = self.list[item]['url'] self.list[item]['url'] = self.play_link.format(vid) d = [(i['id'], i['contentDetails']) for i in items] d = [i for i in d if i[0] == vid] d = d[0][1]['duration'] duration = 0 try: duration += 60 * 60 * int(re.findall('(\d*)H', d)[0]) except Exception: pass try: duration += 60 * int(re.findall('(\d*)M', d)[0]) except Exception: pass try: duration += int(re.findall('(\d*)S', d)[0]) except Exception: pass duration = str(duration) self.list[item]['duration'] = duration except Exception: pass return self.list
def welcome_message(): text = u''.join([control.lang(s) for s in list(range(30131, 30143))]) text_box(text)
def run(self, query=None): if 'Greek' not in str(langs).split(','): control.directory(syshandle) control.infoDialog(control.lang(32002)) return if not control.conditional_visibility( 'System.HasAddon(vfs.libarchive)') and float( control.addon('xbmc.addon').getAddonInfo('version') [:4]) >= 18.0: control.execute('InstallAddon(vfs.libarchive)') threads = [ workers.Thread(self.xsubstv), workers.Thread(self.subzxyz), workers.Thread(self.subtitlesgr) ] dup_removal = False if not query: if control.condVisibility('Player.HasVideo'): infolabel_prefix = 'VideoPlayer' else: infolabel_prefix = 'ListItem' title = control.infoLabel('{0}.Title'.format(infolabel_prefix)) if re.search(r'[^\x00-\x7F]+', title) is not None: title = control.infoLabel( '{0}.OriginalTitle'.format(infolabel_prefix)) year = control.infoLabel('{0}.Year'.format(infolabel_prefix)) tvshowtitle = control.infoLabel( '{0}.TVshowtitle'.format(infolabel_prefix)) season = control.infoLabel('{0}.Season'.format(infolabel_prefix)) if len(season) == 1: season = '0' + season episode = control.infoLabel('{0}.Episode'.format(infolabel_prefix)) if len(episode) == 1: episode = '0' + episode if 's' in episode.lower(): season, episode = '0', episode[-1:] if tvshowtitle != '': # episode title_query = '{0} {1}'.format(tvshowtitle, title) season_episode_query = '{0} S{1} E{2}'.format( tvshowtitle, season, episode) threads = [ workers.Thread(self.xsubstv, title_query), workers.Thread(self.subzxyz, title_query), workers.Thread(self.subtitlesgr, title_query), workers.Thread(self.xsubstv, season_episode_query), workers.Thread(self.subzxyz, season_episode_query), workers.Thread(self.subtitlesgr, season_episode_query) ] dup_removal = True log.log('Dual query used for subtitles search: ' + title_query + ' / ' + season_episode_query) elif year != '': # movie query = '{0} ({1})'.format(title, year) else: # file query, year = getCleanMovieTitle(title) if year != '': query = '{0} ({1})'.format(query, year) if not dup_removal: log.log('Query used for subtitles search: ' + query) self.query = query [i.start() for i in threads] for c, i in list(enumerate(range(0, 40))): is_alive = [x.is_alive() for x in threads] if all(x is False for x in is_alive): log.log('Reached count : ' + str(c)) break if control.aborted is True: log.log('Aborted, reached count : ' + str(c)) break control.sleep(750) if len(self.list) == 0: control.directory(syshandle) return f = [] # noinspection PyUnresolvedReferences f += [i for i in self.list if i['source'] == 'xsubstv'] f += [i for i in self.list if i['source'] == 'subzxyz'] f += [i for i in self.list if i['source'] == 'subtitlesgr'] self.list = f if dup_removal: self.list = [ dict(t) for t in {tuple(d.items()) for d in self.list} ] for i in self.list: try: if i['source'] == 'subzxyz': i['name'] = '[subzxyz] {0}'.format(i['name']) elif i['source'] == 'xsubstv': i['name'] = '[xsubstv] {0}'.format(i['name']) except Exception: pass for i in self.list: u = {'action': 'download', 'url': i['url'], 'source': i['source']} u = '{0}?{1}'.format(sysaddon, urlencode(u)) item = control.item(label='Greek', label2=i['name'], iconImage=str(i['rating']), thumbnailImage='el') item.setProperty('sync', 'false') item.setProperty('hearing_imp', 'false') control.addItem(handle=syshandle, url=u, listitem=item, isFolder=False) control.directory(syshandle)
def _listing(self, url): if self.ajax_url in url: result = client.request(url.partition('?')[0], post=url.partition('?')[2]) else: result = client.request(url) try: header = parseDOM(result, 'h2')[0] except IndexError: header = None next_url = None override = False if self.base_link + '/?s=' in url or control.setting('pagination') == 'true': override = True threads_1 = [] threads_2 = [] # Nest the function to work on either of the two cases def _exec(_items, _next_url=None): if control.setting('threading') == 'true': for count, _item in list(enumerate(_items, start=1)): threads_2.append(workers.Thread(self.loop(_item, header, count, _next_url))) [i.start() for i in threads_2] [i.join() for i in threads_2] else: for count, _item in list(enumerate(_items, start=1)): self.loop(_item, header, count, _next_url) if 'enimerosi-24' not in url and self.ajax_url not in url: ajaxes = [i for i in parseDOM(result, 'script', attrs={'type': 'text/javascript'}) if 'ajaxurl' in i] ajax1 = json.loads(re.search(r'var loadmore_params = ({.+})', ajaxes[-1]).group(1)) ajax2 = json.loads(re.search(r'var cactus = ({.+})', ajaxes[0]).group(1)) ajax = self._ajax_merge(ajax1, ajax2) pages = int(ajax['max_page']) posts = ajax['posts'] try: posts = posts.encode('utf-8') except Exception: pass if control.setting('threading') == 'true' and not override: for i in range(0, pages + 1): threads_1.append( workers.Thread( self.thread(self.ajax_url, post=self.load_more.format(query=quote(posts), page=str(i))) ) ) [i.start() for i in threads_1] [i.join() for i in threads_1] else: for i in range(0, pages + 1): a = client.request(self.ajax_url, post=self.load_more.format(query=quote(posts), page=str(i))) self.data.append(a) if i == 0 and override: next_url = '?'.join([self.ajax_url, self.load_more.format(query=quote(posts), page='1')]) break html = '\n'.join(self.data) items = itertags_wrapper(html, 'div', attrs={'class': 'item item-\d+'}) if len(items) < 20: next_url = None _exec(items, next_url) elif self.ajax_url in url: items = itertags_wrapper(result, 'div', attrs={'class': 'item item-\d+'}) parsed = dict(parse_qsl(url.partition('?')[2])) next_page = int(parsed['page']) + 1 parsed['page'] = next_page if len(items) >= 20: next_url = '?'.join([url.partition('?')[0], urlencode(parsed)]) _exec(items, next_url) else: items = itertags_wrapper(result, 'div', attrs={'class': 'item item-\d+'}) for item in items: text = item.text img = item.attributes['style'] image = re.search(r'url\((.+)\)', img).group(1) title = client.replaceHTMLCodes(parseDOM(text, 'a')[0].strip()) url = parseDOM(text, 'a', ret='href')[0] self.list.append({'title': title, 'image': image, 'url': url}) return self.list
def _video_list(self, cid, url, pagination, limit): try: result = client.request(url) result = json.loads(result) items = result['items'] except Exception: log_debug( 'Youtube: Could not fetch items from the cdn, invalid key or no quota left' ) return for i in list(range(1, limit)): try: if pagination is True: raise Exception if not 'nextPageToken' in result: raise Exception page = url + '&pageToken=' + result['nextPageToken'] result = client.request(page) result = json.loads(result) items += result['items'] except Exception: pass try: if pagination is False: raise Exception next = cid + '&pageToken=' + result['nextPageToken'] except Exception: next = '' for item in items: try: title = item['snippet']['title'] try: title = py2_enc(title) except AttributeError: pass try: url = item['snippet']['resourceId']['videoId'] except (KeyError, ValueError): url = item['id']['videoId'] try: url = py2_enc(url) except AttributeError: pass image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception try: image = py2_enc(image) except AttributeError: pass try: dateadded = item['snippet']['publishedAt'] dateadded = str( iso8601.parse_date(dateadded).strftime( '%Y-%m-%d %H:%M:%S')) except Exception: dateadded = str( datetime.now().strftime('%Y-%m-%d %H:%M:%S')) date = '.'.join(dateadded.split()[0].split('-')[::-1]) data = { 'title': title, 'url': url, 'image': image, 'dateadded': dateadded, 'date': date, 'premiered': dateadded.split()[0], 'aired': dateadded.split()[0], 'year': int(dateadded[:4]) } if next != '': data['next'] = next self.list.append(data) except Exception: pass try: urls = [ list(range(0, len(self.list)))[i:i + 50] for i in list(range(len(list(range(0, len(self.list))))))[::50] ] urls = [','.join([self.list[x]['url'] for x in i]) for i in urls] urls = [ self.content_link.format(''.join([i, self.key_link])) for i in urls ] with concurrent_futures.ThreadPoolExecutor( max_workers=self.max_workers) as executor: threads = [executor.submit(self.thread, u) for u in urls] for future in concurrent_futures.as_completed(threads): item = future.result() if not item: continue self.data.append(item) items = [] for i in self.data: items += json.loads(i)['items'] except Exception: pass for item in list(range(0, len(self.list))): try: vid = self.list[item]['url'] self.list[item]['url'] = self.play_link.format(py3_dec(vid)) d = [(i['id'], i['contentDetails']) for i in items] d = [i for i in d if i[0] == vid] d = d[0][1]['duration'] duration = 0 try: duration += 60 * 60 * int(re.search(r'(\d*)H', d).group(1)) except Exception: pass try: duration += 60 * int(re.search(r'(\d*)M', d).group(1)) except Exception: pass try: duration += int(re.search(r'(\d*)S', d).group(1)) except Exception: pass self.list[item]['duration'] = duration except Exception: pass return self.list