Esempio n. 1
0
def kickinradio():
    try:
        url = 'https://www.internet-radio.com/stations/'
        result = client.request(url)
        items = client.parseDOM(result,
                                'dt',
                                attrs={'style': 'font-size: 22px;'})
    except:
        return

    for item in items:
        try:
            url = client.parseDOM(item, 'a', ret="href")[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addCategoryItem('[UPPERCASE]' + url[10:-1] + '[/UPPERCASE]',
                            'kickinradiocats',
                            radiocasticon,
                            radiocastfanart,
                            url=url)
        except:
            pass

    endDirectory()
Esempio n. 2
0
def kickinradiocats(url):
    try:
        url = urlparse.urljoin('https://www.internet-radio.com', url)

        result = client.request(url)
        result = client.parseDOM(result, 'div', attrs={'class': 'col-md-7'})

        a = client.parseDOM(result, 'h4', attrs={'class': 'text-danger'})
        b = client.parseDOM(result, 'samp')
        items = zip(a, b)
    except:
        return

    for item in items:
        try:
            try:
                a = client.parseDOM(item[0], 'a')[0]
            except:
                a = ''
            try:
                b = [
                    i for i in client.parseDOM(item[0], 'a', ret='href')
                    [0].split('/') if not i == ''
                ][-1]
            except:
                b = ''
            if not a == '': name = a
            elif not b == '': name = b
            else: name = item[0]
            name = name.capitalize()
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item[1].split()
            url = [i for i in url if i.startswith('http')][0]
            url = re.sub('[0-9a-zA-Z]+\.pls(?:.+|)|\.m3u(?:.+|)', '', url)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addDirectoryItem(name, url, '0', radiocasticon, radiocastfanart)
        except:
            pass

    try:
        next = client.parseDOM(result, 'ul', attrs={'class': 'pagination'})
        next = client.parseDOM(next, 'li', attrs={'class': 'next'})
        next = client.parseDOM(next, 'a', ret='href')[0]
        next = client.replaceHTMLCodes(next)
        next = next.encode('utf-8')

        addCategoryItem('[B][I]NEXT[/I][/B]',
                        'kickinradiocats',
                        radiocasticon,
                        radiocastfanart,
                        url=next)
    except:
        pass

    endDirectory()
    def resolve(self, url):
        try:
            id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split(
                '&')[0]
            result = client.request('http://www.youtube.com/watch?v=%s' % id)

            message = client.parseDOM(result,
                                      'div',
                                      attrs={'id': 'unavailable-submessage'})
            message = ''.join(message)

            alert = client.parseDOM(result,
                                    'div',
                                    attrs={'id': 'watch7-notification-area'})

            if len(alert) > 0: raise Exception()
            if re.search('[a-zA-Z]', message): raise Exception()

            url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
            return url
        except:
            return
Esempio n. 4
0
    def episodeAbsoluteNumber(self, thetvdb, season, episode):
        try:
            url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % (
                'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb,
                int(season), int(episode))
            r = client.request(url)
            episode = client.parseDOM(r, 'absolute_number')[0]

            return int(episode)
        except:
            pass

        return episode
Esempio n. 5
0
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url, timeout='10')
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'),
               client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r
              if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [
             i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Esempio n. 6
0
    def searchShow(self, title, season, year):
        try:
            title = cleantitle.normalize(title)
            t = cleantitle.get(title)

            url = urlparse.urljoin(
                self.base_link, self.search_link % urllib.quote_plus(
                    cleantitle.query('%s Season %01d' %
                                     (title.replace('\'', '-'), int(season)))))
            r = client.request(url, timeout='10')
            r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
            if r:
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [
                    i[0] for i in r
                    if t == cleantitle.get(i[1]) and int(season) == int(i[2])
                ][0]
            else:
                url = urlparse.urljoin(
                    self.base_link, self.search_link % urllib.quote_plus(
                        cleantitle.query(
                            '%s %01d' %
                            (title.replace('\'', '-'), int(year)))))
                r = client.request(url, timeout='10')
                r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [
                    i[0] for i in r
                    if t == cleantitle.get(i[1]) and year == i[2]
                ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            return url.encode('utf-8')
        except:
            return
Esempio n. 7
0
def radio181fm():
    try:
        url = 'http://www.181.fm/index.php?p=mp3links'

        result = client.request(url)

        index = []
        items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'})
    except:
        pass

    for item in items:
        try:
            if not item.startswith('http://'): raise Exception()

            name = items[:items.index(item)]
            name = [i for i in name if not 'http://' in i][-1]
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item.split('<')[0].replace('///', '://')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            index.append({
                'name': name,
                'url': url,
                'thumb': '0',
                'image': radio181fmicon,
                'fanart': radio181fmfanart
            })
        except:
            pass

    index = [i for x, i in enumerate(index) if i not in index[x + 1:]]
    index = sorted(index, key=lambda k: k['name'])
    for i in index:
        addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'],
                         i['fanart'])

    endDirectory()
Esempio n. 8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = data['url']
            episode = int(data['episode'])

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            p = client.request(url, timeout='10')

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                r = [i[0] for i in r if int(i[1]) == episode][0]
                p = client.request(r, timeout='10')

            p = re.findall('load_player\((\d+)\)', p)
            p = urllib.urlencode({'id': p[0]})
            headers = {'Referer': url}
            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3')
            r = client.request(r,
                               post=p,
                               headers=headers,
                               XHR=True,
                               timeout='10')
            url = json.loads(r)['value']
            url = client.request(url,
                                 headers=headers,
                                 XHR=True,
                                 output='geturl',
                                 timeout='10')

            if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
                sources.append({
                    'source': 'openload.co',
                    'quality': 'HD',
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
                raise Exception()

            r = client.request(url, headers=headers, XHR=True, timeout='10')
            try:
                src = json.loads(r)['playlist'][0]['sources']
                links = [i['file'] for i in src if 'file' in i]
                for i in links:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources