示例#1
0
def kickinradiocats(url):
    try:
        url = urlparse.urljoin('https://www.internet-radio.com', url)

        result = client.request(url)
        result = client.parseDOM(result, 'div', attrs={'class': 'col-md-7'})

        a = client.parseDOM(result, 'h4', attrs={'class': 'text-danger'})
        b = client.parseDOM(result, 'samp')
        items = zip(a, b)
    except:
        return

    for item in items:
        try:
            try:
                a = client.parseDOM(item[0], 'a')[0]
            except:
                a = ''
            try:
                b = [
                    i for i in client.parseDOM(item[0], 'a', ret='href')
                    [0].split('/') if not i == ''
                ][-1]
            except:
                b = ''
            if not a == '': name = a
            elif not b == '': name = b
            else: name = item[0]
            name = name.capitalize()
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item[1].split()
            url = [i for i in url if i.startswith('http')][0]
            url = re.sub('[0-9a-zA-Z]+\.pls(?:.+|)|\.m3u(?:.+|)', '', url)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addDirectoryItem(name, url, '0', radiocasticon, radiocastfanart)
        except:
            pass

    try:
        next = client.parseDOM(result, 'ul', attrs={'class': 'pagination'})
        next = client.parseDOM(next, 'li', attrs={'class': 'next'})
        next = client.parseDOM(next, 'a', ret='href')[0]
        next = client.replaceHTMLCodes(next)
        next = next.encode('utf-8')

        addCategoryItem('[B][I]NEXT[/I][/B]',
                        'kickinradiocats',
                        radiocasticon,
                        radiocastfanart,
                        url=next)
    except:
        pass

    endDirectory()
示例#2
0
def kickinradio():
    try:
        url = 'https://www.internet-radio.com/stations/'
        result = client.request(url)
        items = client.parseDOM(result,
                                'dt',
                                attrs={'style': 'font-size: 22px;'})
    except:
        return

    for item in items:
        try:
            url = client.parseDOM(item, 'a', ret="href")[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addCategoryItem('[UPPERCASE]' + url[10:-1] + '[/UPPERCASE]',
                            'kickinradiocats',
                            radiocasticon,
                            radiocastfanart,
                            url=url)
        except:
            pass

    endDirectory()
示例#3
0
def parse(url):
    try: url = client.replaceHTMLCodes(url)
    except: pass
    try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
    except: pass
    try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
    except: pass
    return url
示例#4
0
def strip_domain(url):
    try:
        if url.lower().startswith('http') or url.startswith('/'):
            url = re.findall('(?://.+?|)(/.+)', url)[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
    except:
        return
示例#5
0
def radio181fm():
    try:
        url = 'http://www.181.fm/index.php?p=mp3links'

        result = client.request(url)

        index = []
        items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'})
    except:
        pass

    for item in items:
        try:
            if not item.startswith('http://'): raise Exception()

            name = items[:items.index(item)]
            name = [i for i in name if not 'http://' in i][-1]
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item.split('<')[0].replace('///', '://')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            index.append({
                'name': name,
                'url': url,
                'thumb': '0',
                'image': radio181fmicon,
                'fanart': radio181fmfanart
            })
        except:
            pass

    index = [i for x, i in enumerate(index) if i not in index[x + 1:]]
    index = sorted(index, key=lambda k: k['name'])
    for i in index:
        addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'],
                         i['fanart'])

    endDirectory()
示例#6
0
    def searchShow(self, title, season, year):
        try:
            title = cleantitle.normalize(title)
            t = cleantitle.get(title)

            url = urlparse.urljoin(
                self.base_link, self.search_link % urllib.quote_plus(
                    cleantitle.query('%s Season %01d' %
                                     (title.replace('\'', '-'), int(season)))))
            r = client.request(url, timeout='10')
            r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
            if r:
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [
                    i[0] for i in r
                    if t == cleantitle.get(i[1]) and int(season) == int(i[2])
                ][0]
            else:
                url = urlparse.urljoin(
                    self.base_link, self.search_link % urllib.quote_plus(
                        cleantitle.query(
                            '%s %01d' %
                            (title.replace('\'', '-'), int(year)))))
                r = client.request(url, timeout='10')
                r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [
                    i[0] for i in r
                    if t == cleantitle.get(i[1]) and year == i[2]
                ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            return url.encode('utf-8')
        except:
            return
示例#7
0
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url, timeout='10')
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'),
               client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r
              if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [
             i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
示例#8
0
def resolve(regex):
    try:
        vanilla = re.compile('(<regex>.+)',
                             re.MULTILINE | re.DOTALL).findall(regex)[0]
        cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>',
                            re.MULTILINE | re.DOTALL).findall(regex)
        for i in cddata:
            regex = regex.replace('<![CDATA[' + i + ']]>',
                                  urllib.quote_plus(i))

        regexs = re.compile('(<regex>.+)',
                            re.MULTILINE | re.DOTALL).findall(regex)[0]
        regexs = re.compile('<regex>(.+?)</regex>',
                            re.MULTILINE | re.DOTALL).findall(regexs)
        regexs = [
            re.compile('<(.+?)>(.*?)</.+?>',
                       re.MULTILINE | re.DOTALL).findall(i) for i in regexs
        ]

        regexs = [
            dict([(client.replaceHTMLCodes(x[0]),
                   client.replaceHTMLCodes(urllib.unquote_plus(x[1])))
                  for x in i]) for i in regexs
        ]
        regexs = [(i['name'], i) for i in regexs]
        regexs = dict(regexs)

        url = regex.split('<regex>', 1)[0].strip()
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')

        r = getRegexParsed(regexs, url)

        try:
            ln = ''
            ret = r[1]
            listrepeat = r[2]['listrepeat']
            regexname = r[2]['name']

            for obj in ret:
                try:
                    item = listrepeat
                    for i in range(len(obj) + 1):
                        item = item.replace(
                            '[%s.param%s]' % (regexname, str(i)), obj[i - 1])

                    item2 = vanilla
                    for i in range(len(obj) + 1):
                        item2 = item2.replace(
                            '[%s.param%s]' % (regexname, str(i)), obj[i - 1])

                    item2 = re.compile('(<regex>.+?</regex>)',
                                       re.MULTILINE | re.DOTALL).findall(item2)
                    item2 = [
                        x for x in item2
                        if not '<name>%s</name>' % regexname in x
                    ]
                    item2 = ''.join(item2)

                    ln += '\n<item>%s\n%s</item>\n' % (item, item2)
                except:
                    pass

            return ln
        except:
            pass

        if r[1] == True:
            return r[0]
    except:
        return