Exemple #1
0
    def cookie(self):
        try:
            login = '******'

            token = client.request(login)
            token = client.parseDOM(token,
                                    'input',
                                    ret='value',
                                    attrs={'name': 'csrfmiddlewaretoken'})[0]

            headers = {'Cookie': 'csrftoken=%s' % token}

            post = {
                'username': self.user,
                'password': self.password,
                'csrfmiddlewaretoken': token,
                'next': ''
            }
            post = urllib.urlencode(post)

            c = client.request(login,
                               post=post,
                               headers=headers,
                               output='cookie')
            return c
        except:
            pass
Exemple #2
0
    def resolve(self, url):
        result = client.request(url)

        if result == None: return

        result = result.replace('\n', '')

        try:
            url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
            url = re.findall('"(.+?)"', url)
            url = [i for i in url if '.m3u8' in i][0]
            if "EXTM3U" in client.request(url): return url
        except:
            pass

        try:
            url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
            url = re.findall('"(.+?)"', url)
            url = [i for i in url if i.startswith('rtmp')][0]

            p = re.findall('/([a-zA-Z0-9]{3,}\:)', url)
            if len(p) > 0: url = url.replace(p[0], ' playpath=%s' % p[0])
            url += ' timeout=10'
        except:
            pass

        try:
            url = re.findall(
                '(?:youtube.com|youtu.be)/(?:embed/|.+?\?v=|.+?\&v=|v/)([0-9A-Za-z_\-]+)',
                result)[0]
            url = 'plugin://plugin.video.youtube/play/?video_id=%s' % url
            return url
        except:
            pass
Exemple #3
0
    def play_list(self, url):
        try:
            result = client.request(url)
            result = json.loads(result)
            items = result['items']
        except:
            pass

        for i in range(1, 5):
            try:
                if not 'nextPageToken' in result: raise Exception()
                next = url + '&pageToken=' + result['nextPageToken']
                result = client.request(next)
                result = json.loads(result)
                items += result['items']
            except:
                pass

        for item in items:
            try:
                title = item['snippet']['title']
                title = title.encode('utf-8')

                url = item['id']
                url = url.encode('utf-8')

                image = item['snippet']['thumbnails']['high']['url']
                if '/default.jpg' in image: raise Exception()
                image = image.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image})
            except:
                pass

        return self.list
Exemple #4
0
    def download(self, path, url):
        try:
            cookie = None

            anonymous = (self.user == '' or self.password == '')

            code, result = client.request(url, output='response', error=True)

            if code == '429' and anonymous == True:
                control.dialog.ok(str('xsubs.tv'), str(result), str(''))
                return
            elif anonymous == False:
                cookie = cache.get(self.cookie, 6)

            result, headers, content, cookie = client.request(
                url, cookie=cookie, output='extended')

            subtitle = content['Content-Disposition']
            subtitle = re.findall('"(.+?)"', subtitle)[0]

            subtitle = os.path.join(path, subtitle.decode('utf-8'))

            if not subtitle.endswith('.srt'): raise Exception()

            with open(subtitle, 'wb') as subFile:
                subFile.write(result)
            subFile.close()

            return subtitle
        except:
            pass
    def get(self, query):
        try:
            title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$', query)[0]

            season, episode = '%01d' % int(season), '%02d' % int(episode)

            title = re.sub('^THE\s+|^A\s+', '', title.strip().upper())
            title = cleantitle.get(title)

            url = 'http://www.xsubs.tv/series/all.xml'

            srsid = cache.get(self.cache, 48, url)
            srsid = [i[0] for i in srsid if title == i[1]][0]

            url = 'http://www.xsubs.tv/series/%s/main.xml' % srsid

            result = client.request(url)
            ssnid = client.parseDOM(result, 'series_group', ret='ssnid', attrs = {'ssnnum': season})[0]

            url = 'http://www.xsubs.tv/series/%s/%s.xml' % (srsid, ssnid)

            result = client.request(url)

            items = client.parseDOM(result, 'subg')
            items = [(client.parseDOM(i, 'etitle', ret='number'), i) for i in items]
            items = [i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode][0]
            items = re.findall('(<sr .+?</sr>)', items)
        except:
            return

        for item in items:
            try:
                p = client.parseDOM(item, 'sr', ret='published_on')[0]
                if p == '': raise Exception()

                name = client.parseDOM(item, 'sr')[0]
                name = name.rsplit('<hits>', 1)[0]
                name = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip()
                name = '%s %s' % (query, name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'sr', ret='rlsid')[0]
                url = 'http://www.xsubs.tv/xthru/getsub/%s' % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'source': 'xsubstv', 'rating': 5})
            except:
                pass

        return self.list
Exemple #6
0
    def item_list_11(self):
        try:
            result = client.request(self.tvshows_link)

            filter = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'panel-row row-.+?'})[0]
            filter = client.parseDOM(filter,
                                     'div',
                                     attrs={'class': 'views.+?limit-'})
            filter = client.parseDOM(filter, 'a', ret='href')
            filter = [x for y, x in enumerate(filter) if x not in filter[:y]]

            threads = []
            for i in range(0, 7):
                threads.append(
                    workers.Thread(self.thread, i, self.tvshows_link_2,
                                   self.tvshows_link_3 % str(i)))
                self.data.append('')
            [i.start() for i in threads]
            [i.join() for i in threads]

            items = ''
            for i in self.data:
                items += json.loads(i)[1]['data']
            items = client.parseDOM(items, 'li')
        except:
            return

        for item in items:
            try:
                title = client.parseDOM(item,
                                        'div',
                                        attrs={'class':
                                               'views-field-title'})[0]
                title = client.parseDOM(title, 'a')[0]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                flt = True if any(url == i for i in filter) else False
                url = urlparse.urljoin(self.base_link, url)
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, "img", ret="src")[0]
                image = urlparse.urljoin(self.base_link, image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({
                    'title': title,
                    'url': url,
                    'image': image,
                    'filter': flt
                })
            except:
                pass

        return self.list
 def item_list_worker_2(self, i):
     try:
         image = re.sub('/\d+x\d+/', '/405x227/', self.list[i]['image'])
         image = client.request(image, output='geturl')
         if image == None: raise Exception()
         self.list[i].update({'image': image})
     except:
         pass
    def resolve(self, url):
        try:
            if url.startswith('plugin://'): return url

            try: u = re.findall('(.+?)(\d+p)(.+)', url)[0]
            except: return url

            r = u[0] + '480p' + u[2]
            r = client.request(r, output='geturl')
            if not r == None: return r

            r = u[0] + '360p' + u[2]
            r = client.request(r, output='geturl')
            if not r == None: return r

            return url
        except:
            pass
Exemple #9
0
    def resolve_live(self):
        links = []

        try:
            result = client.request(self.live_link)

            url = re.findall(
                '(?:\"|\')(http(?:s|)://.+?\.m3u8(?:.+?|))(?:\"|\')',
                result)[-1]
            url = client.request(url, output='geturl')

            links.append(url)
        except:
            pass

        try:
            result = client.request(self.live_link_2)

            url = re.findall(
                '(?:youtube.com|youtu.be)/(?:embed/|.+?\?v=|.+?\&v=|v/)([0-9A-Za-z_\-]+)',
                result)[0]
            url = 'http://www.youtube.com/watch?v=%s' % url

            url = client.request(url)
            url = re.findall('"hlsvp"\s*:\s*"(.+?)"', url)[0]
            url = urllib.unquote(url).replace('\\/', '/')

            url = client.request(url)
            url = url.replace('\n', '')
            url = re.findall('RESOLUTION\s*=\s*(\d*)x\d{1}.+?(http.+?\.m3u8)',
                             url)

            url = [(int(i[0]), i[1]) for i in url]
            url.sort()
            url = url[-1][1]

            links.append(url)
        except:
            pass

        if links == []: return

        return links[0]
    def cache(self, url):
        try:
            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            result = zip(client.parseDOM(result, 'series', ret='srsid'), client.parseDOM(result, 'series'))
            result = [(i[0], cleantitle.get(i[1])) for i in result]

            return result
        except:
            pass
Exemple #11
0
    def resolve(self, url):
        try:
            item = cache.get(self.item_list, 24, self.player_link)
            item = [i for i in item if url == i['url']][0]

            title, url, image = item['title'], item['uid'], item['image']

            url = client.request(url, output='geturl')

            return (title, url, image)
        except:
        	pass
 def cache(self, i):
     try:
         self.r = client.request(i)
         self.r = re.sub(r'[^\x00-\x7F]+', ' ', self.r)
         t = re.findall(
             '(?:\"|\')original_title(?:\"|\')\s*:\s*(?:\"|\')(.+?)(?:\"|\')',
             self.r)[0]
         y = re.findall('(?:\"|\')year(?:\"|\')\s*:\s*(?:\"|\'|)(\d{4})',
                        self.r)[0]
         return (t, y)
     except:
         pass
    def item_list_worker(self, i):
        try:
            if self.list[i]['check'] == False: raise Exception()

            url = urlparse.urljoin(self.base_link, self.list[i]['link'])
            result = client.request(url)

            url = re.findall('(?:youtube.com|youtu.be)/(?:embed/|.+?\?v=|.+?\&v=|v/)([0-9A-Za-z_\-]+)', result)[0]
            url = 'plugin://plugin.video.youtube/play/?video_id=%s' % url

            self.list[i].update({'url': url, 'check': False})
        except:
            pass
Exemple #14
0
    def resolve(self, url):
        try:
            headers = {'User-Agent': 'NSPlayer/12.00.14393.0693 WMFSDK/12.00.14393.0693'}
            item = cache.get(self.item_list, 24, self.player_link)
            item = [i for i in item if url == i['url']][0]

            title, url, image = item['title'], item['uid'], item['image']

            url = client.request(url, headers=headers, output='geturl')
            url += '|%s' % urllib.urlencode(headers)

            return (title, url, image)
        except:
        	pass
    def item_list(self, url):
        try:
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)

            items = client.parseDOM(result, 'item')
        except:
            return

        for item in items:
            try:
                title = client.parseDOM(item, 'title')[0]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                image = client.parseDOM(item, 'media:thumbnail', ret='url')[0]
                image = image.split('?')[0]
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                url = client.parseDOM(item, 'enclosure', ret='url')
                check = False if len(url) > 0 else True
                url = url[0] if len(url) > 0 else '0'
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                link = client.parseDOM(item, 'link')[0]
                link = re.sub('.+?//.+?/','/', link)
                link = client.replaceHTMLCodes(link)
                link = link.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image, 'link': link, 'check': check})
            except:
                pass

        threads = []
        for i in range(0, len(self.list)): threads.append(workers.Thread(self.item_list_worker, i))
        [i.start() for i in threads]
        [i.join() for i in threads]

        self.list = [i for i in self.list if i['check'] == False]

        threads = []
        for i in range(0, len(self.list)): threads.append(workers.Thread(self.item_list_worker_2, i))
        [i.start() for i in threads]
        [i.join() for i in threads]

        return self.list
Exemple #16
0
    def get(self, query):
        try:
            filter = ['freeprojectx', 'subs4series', 'Εργαστήρι Υποτίτλων'.decode('iso-8859-7')]

            query = ' '.join(urllib.unquote_plus(re.sub('%\w\w', ' ', urllib.quote_plus(query))).split())

            url = 'http://www.subtitles.gr/search.php?name=%s&sort=downloads+desc' % urllib.quote_plus(query)
            result = client.request(url)
            #result = result.decode('iso-8859-7').encode('utf-8')

            items = client.parseDOM(result, 'tr', attrs = {'on.+?': '.+?'})
        except:
            return

        for item in items:
            try:
                if not 'flags/el.gif' in item: raise Exception()

                try: uploader = client.parseDOM(item, 'a', attrs = {'class': 'link_from'})[0].strip()
                except: uploader = 'other'

                if uploader in filter: raise Exception()

                if uploader == '': uploader = 'other'

                try: downloads = client.parseDOM(item, 'td', attrs = {'class': 'latest_downloads'})[0]
                except: downloads = '0'
                downloads = re.sub('[^0-9]', '', downloads)

                name = client.parseDOM(item, 'a', attrs = {'onclick': 'runme.+?'})[0]
                name = ' '.join(re.sub('<.+?>', '', name).split())
                name = '[%s] %s [%s DLs]' % (uploader, name, downloads)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href', attrs = {'onclick': 'runme.+?'})[0]
                url = url.split('"')[0].split('\'')[0].split(' ')[0]
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                rating = self._rating(downloads)

                self.list.append({'name': name, 'url': url, 'source': 'subtitlesgr', 'rating': rating})
            except:
                pass

        return self.list
    def download(self, path, url):
        try:
            result = client.request(url)

            f = os.path.splitext(urlparse.urlparse(url).path)[1][1:]
            f = os.path.join(path, 'file.%s' % f)

            with open(f, 'wb') as subFile:
                subFile.write(result)
            subFile.close()

            dirs, files = control.listDir(path)

            if len(files) == 0: return

            control.execute('Extract("%s","%s")' % (f, path))

            for i in range(0, 10):
                try:
                    dirs, files = control.listDir(path)
                    if len(files) > 1: break
                    control.sleep(1000)
                except:
                    pass

            control.deleteFile(f)

            subtitle = [
                i for i in files if any(
                    i.endswith(x) for x in ['.srt', '.sub'])
            ][0]

            subtitle = os.path.join(path, subtitle.decode('utf-8'))

            return subtitle
        except:
            pass
Exemple #18
0
    def item_list(self, url):
        try:
            result = client.request(url, mobile=True)
            result = json.loads(result)

            items = result['webradios']
        except:
        	return

        for item in items:
            try:
                title = item['name'].strip()
                title = title.encode('utf-8')

                url = item['web_url'].strip()
                url = [i for i in url.split('/') if not i == ''][-1]
                url = url.encode('utf-8')

                uid = []
                uid.append(item['url_triton_64k_aac'])
                uid.append(item['url_64k_aac'])
                uid.append(item['url_triton_128k_mp3'])
                uid.append(item['url_128k_mp3'])
                uid.append(item['url_hls'])
                uid.append(item['url_hd_aac'])
                uid = [i for i in uid if '//' in i and not '.flv' in i][0]
                uid = uid.encode('utf-8')

                image = item['logo'].strip()
                image = self.image_link % image
                image = image.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'uid': uid, 'image': image})
            except:
                pass

        return self.list
Exemple #19
0
    def download(self, path, url):
        try:
            url = re.findall('/(\d+)/', url + '/', re.I)[-1]
            url = 'http://www.findsubtitles.eu/getp.php?id=%s' % url
            url = client.request(url, output='geturl')

            data = urllib2.urlopen(url, timeout=10).read()
            zip = zipfile.ZipFile(StringIO.StringIO(data))
            files = zip.namelist()
            files = [i for i in files if i.startswith('subs/')]
            srt = [
                i for i in files if any(
                    i.endswith(x) for x in ['.srt', '.sub'])
            ]
            rar = [
                i for i in files if any(
                    i.endswith(x) for x in ['.rar', '.zip'])
            ]

            if len(srt) > 0:
                result = zip.open(srt[0]).read()

                subtitle = os.path.basename(srt[0])

                subtitle = os.path.join(path, subtitle.decode('utf-8'))

                with open(subtitle, 'wb') as subFile:
                    subFile.write(result)
                subFile.close()

                return subtitle

            elif len(rar) > 0:
                result = zip.open(rar[0]).read()

                f = os.path.splitext(urlparse.urlparse(rar[0]).path)[1][1:]
                f = os.path.join(path, 'file.%s' % f)

                with open(f, 'wb') as subFile:
                    subFile.write(result)
                subFile.close()

                dirs, files = control.listDir(path)

                if len(files) == 0: return

                control.execute('Extract("%s","%s")' % (f, path))

                for i in range(0, 10):
                    try:
                        dirs, files = control.listDir(path)
                        if len(files) > 1: break
                        control.sleep(1000)
                    except:
                        pass

                control.deleteFile(f)

                subtitle = [
                    i for i in files if any(
                        i.endswith(x) for x in ['.srt', '.sub'])
                ][0]

                subtitle = os.path.join(path, subtitle.decode('utf-8'))

                return subtitle

        except:
            pass
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)$', query)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'http://subztv.gr/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result, 'div', attrs={'id': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urlparse.urljoin('http://subztv.gr', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if not c == None:
                        if cleantitle.get(c[0]) == cleantitle.get(
                                title) and c[1] == year:
                            try:
                                item = self.r
                            except:
                                item = client.request(i)
                            break

            else:

                title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$',
                                                    query)[0]

                season, episode = '%01d' % int(season), '%01d' % int(episode)

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'http://subztv.gr/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result, 'div', attrs={'id': 'series'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urlparse.urljoin('http://subztv.gr', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if not c == None:
                        if cleantitle.get(c[0]) == cleantitle.get(title):
                            item = i
                            break

                item = '%s/seasons/%s/episodes/%s' % (item, season, episode)
                item = client.request(item)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '\d+'})
        except:
            return

        for item in items:
            try:
                if not 'img/el.png' in item: raise Exception()

                name = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]
                name = name.split('>')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = re.findall('\'(http(?:s|)\://.+?)\'', item)[-1]
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subztvgr',
                    'rating': 5
                })
            except:
                pass

        return self.list
Exemple #21
0
    def item_list_2(self, url):
        try:
            base_link = re.findall('(http(?:s|)://.+?)/', url)

            if base_link:
                base_link = base_link[0]
            else:
                base_link = self.base_link

            if not '/webtv/' in url:
                result = client.request(url + '/webtv/')
                result = re.findall('"actual_args"\s*:\s*\["(.+?)"', result)[0]
            else:
                url, result = url.split('/webtv/')

            url = '%s/webtv/%s?page=%s' % (url, result.lower(), '%s')

            self.data.append('')
            self.thread(0, url % '0', None)

            try:
                result = client.parseDOM(self.data[0],
                                         'div',
                                         attrs={'role': 'main'})[0]
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'view.+?'})[0]

                num = client.parseDOM(
                    result,
                    'li',
                    attrs={'class': 'pager__item pager__item--last'})[0]
                num = re.findall('page=(\d+)', num)[0]
                if num > 9: num = 9
                num = int(num) + 1

                threads = []
                for i in range(1, num):
                    self.data.append('')
                    threads.append(
                        workers.Thread(self.thread, i, url % str(i), None))
                [i.start() for i in threads]
                [i.join() for i in threads]
            except:
                pass

            items = ''
            for i in self.data:
                items += i

            items = client.parseDOM(items, 'div', attrs={'role': 'main'})
            items = [
                client.parseDOM(i, 'div', attrs={'class': 'view.+?'})
                for i in items
            ]
            items = [i[0] for i in items if len(i) > 0]
            items = client.parseDOM(items, 'article')
        except:
            return

        for item in items:
            try:
                t = client.parseDOM(item, 'div', attrs={'class':
                                                        'itemtitle'})[0]
                title = client.parseDOM(t, 'span')
                if title: title = title[0]
                else: title = t
                if title == '' or 'sneak preview' in title.lower():
                    raise Exception()
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                tvshowtitle = client.parseDOM(item,
                                              'div',
                                              attrs={'class': 'showtitle'})
                if tvshowtitle: tvshowtitle = tvshowtitle[0]
                else: tvshowtitle = title
                tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
                tvshowtitle = tvshowtitle.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = urlparse.urljoin(base_link, url)
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                if url in [i['url'] for i in self.list]: raise Exception()

                image = client.parseDOM(item, 'img', ret='src')[0]
                image = urlparse.urljoin(base_link, image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({
                    'title': title,
                    'url': url,
                    'image': image,
                    'tvshowtitle': tvshowtitle
                })
            except:
                pass

        return self.list
Exemple #22
0
 def thread(self, i, url, post):
     try:
         result = client.request(url, post=post)
         self.data[i] = result
     except:
         return
Exemple #23
0
 def thread(self, url, i):
     try:
         result = client.request(url)
         self.data[i] = result
     except:
         return
Exemple #24
0
    def video_list(self, cid, url, pagination):
        try:
            result = client.request(url)
            result = json.loads(result)
            items = result['items']
        except:
            pass

        for i in range(1, 5):
            try:
                if pagination == True: raise Exception()
                if not 'nextPageToken' in result: raise Exception()
                page = url + '&pageToken=' + result['nextPageToken']
                result = client.request(page)
                result = json.loads(result)
                items += result['items']
            except:
                pass

        try:
            if pagination == False: raise Exception()
            next = cid + '&pageToken=' + result['nextPageToken']
        except:
            next = ''

        for item in items:
            try:
                title = item['snippet']['title']
                title = title.encode('utf-8')

                try:
                    url = item['snippet']['resourceId']['videoId']
                except:
                    url = item['id']['videoId']
                url = url.encode('utf-8')

                image = item['snippet']['thumbnails']['high']['url']
                if '/default.jpg' in image: raise Exception()
                image = image.encode('utf-8')

                append = {'title': title, 'url': url, 'image': image}
                if not next == '': append['next'] = next
                self.list.append(append)
            except:
                pass

        try:
            u = [
                range(0, len(self.list))[i:i + 50]
                for i in range(len(range(0, len(self.list))))[::50]
            ]
            u = [','.join([self.list[x]['url'] for x in i]) for i in u]
            u = [self.content_link % i + self.key_link for i in u]

            threads = []
            for i in range(0, len(u)):
                threads.append(workers.Thread(self.thread, u[i], i))
                self.data.append('')
            [i.start() for i in threads]
            [i.join() for i in threads]

            items = []
            for i in self.data:
                items += json.loads(i)['items']
        except:
            pass

        for item in range(0, len(self.list)):
            try:
                vid = self.list[item]['url']

                self.list[item]['url'] = self.play_link % vid

                d = [(i['id'], i['contentDetails']) for i in items]
                d = [i for i in d if i[0] == vid]
                d = d[0][1]['duration']

                duration = 0
                try:
                    duration += 60 * 60 * int(re.findall('(\d*)H', d)[0])
                except:
                    pass
                try:
                    duration += 60 * int(re.findall('(\d*)M', d)[0])
                except:
                    pass
                try:
                    duration += int(re.findall('(\d*)S', d)[0])
                except:
                    pass
                duration = str(duration)

                self.list[item]['duration'] = duration
            except:
                pass

        return self.list