예제 #1
0
def show_movies():
    xbmc.log("Function : Show_Movies")

    url = h.extract_var(args, 'url')
    
    name = h.extract_var(args, 'name')

    JSONObj = json.loads(h.make_request(MOVIE_SHOW_URL + url, cookie_file, cookie_jar))

    thumbnail = JSONObj["details"]["listing_image_small"]
    plot = JSONObj["details"]["seo_description"]
    h.add_dir_video(addon_handle, JSONObj["details"]["title"], JSONObj["playback_url"], thumbnail, plot)
def movie():
    url = h.extract_var(args, 'url')
    title = h.extract_var(args, 'name')
    thumb = ''
    resp = h.make_request(url, cookie_file, cookie_jar)
    soup = BeautifulSoup(resp)

    div = h.bs_find_with_class(soup, 'div', 'entry-content')
    video_url = urlresolver.resolve(dict(div.find('iframe').attrs)['src'])

    if video_url:
        h.add_dir_video(addon_handle, title, video_url, thumb, '')
예제 #3
0
def show():
    xbmc.log("Function : Show")

    url = h.extract_var(args, 'url')
    
    xbmc.log("URL : " + CHANNEL_SHOW_URL + url)
    name = h.extract_var(args, 'name')

    JSONObj = json.loads(h.make_request(CHANNEL_SHOW_URL + url, cookie_file, cookie_jar))

    thumbnail = JSONObj["listing_image_small"]
    plot = JSONObj["description"]
    h.add_dir_video(addon_handle, JSONObj["title"], JSONObj["playback_url"], thumbnail, plot)
예제 #4
0
def links():
    import urlresolver

    links_info = json.loads(h.extract_var(args, 'elem_id'))
    for link in links_info:
        url = link['url']
        print url
        resp = None
        try:
            resp = h.make_request(url, cookie_file, cookie_jar)
            soup = BeautifulSoup(resp)
            if len(soup.findChildren()) == 1:
                meta = soup.find('meta', attrs={'http-equiv': 'refresh'})
                if meta:
                    c = dict(meta.attrs)['content']
                    idx4 = c.find('URL=')
                    if idx4 != -1:
                        _url = c[idx4 + 4:]
                        soup = BeautifulSoup(h.make_request(_url, cookie_file, cookie_jar))
                        div = soup.find('div', {'id': 'content'})
                        url = dict(div.find('table').find('iframe').attrs)['src']
            else:
                div = h.bs_find_with_class(soup, 'div', 'entry-content')
                if div:
                    divs = div.findAll('div', recursive=False)
                    url = dict(divs[1].find('iframe').attrs)['src']
        except urllib2.HTTPError as e:
            # Hack. Avast blocks first url. Only for WatchVideo currently
            if e.code == 403 and e.msg == 'Malicious content':
                up = urlparse.urlparse(url)
                id = urlparse.parse_qs(up.query)['id'][0]
                f = 0
                if up.path == '/idowatch.php':
                    url = 'http://vidfile.xyz/embed-%s-1280x720.html' % id
                    f = 1
                elif up.path == '/watchvideo.php':
                    url = 'http://watchvideo2.us/embed-%s-1280x720.html' % id
                    f = 1
                elif up.path == '/playu.php':
                    url = 'http://playu.me/embed-%s-1280x720.html' % id
                    f = 1

                if f:
                    resp = h.make_request(url, cookie_file, cookie_jar)
        if resp:
            video_url = urlresolver.resolve(url)
            if video_url:
                h.add_dir_video(addon_handle, link['name'], video_url, '', '')
def todays_episode():    
    url = h.extract_var(args, 'url')

    name = h.extract_var(args, 'name')

    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))

    div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'})

    script = None
    scripts = div.findAll('script')
    for s in scripts:
        if 'babyenjoying' in s.text:
            script = s
            break

    master_m3u8 = script.text.split('babyenjoying = ', 2)[2].split(';')[0][1:-1]

    plot = soup.find('p', {'itemprop': 'description'}).text
    thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content']

    h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
예제 #6
0
def show_episodes():
    xbmc.log("Show Episodes Menu")
    showname = h.extract_var(args, 'url')

    v_data = "{\"detailsType\":\"basic\",\"searchSet\":[{\"pageSize\":50,\"pageNumber\":" + param1 + ",\"sortOrder\":\"START_DATE:DESC\",\"type\":\"search\",\"id\":\"Episodes\",\"data\":\"exact=true&all=type:Episodes&all=showname:" + showname + "\"}, {\"pageSize\":50,\"pageNumber\":" + param1 + ",\"sortOrder\":\"START_DATE:DESC\",\"type\":\"search\",\"id\":\"video\",\"data\":\"exact=true&all=type:video&all=showname:" + showname + "\"}],\"deviceDetails\":{\"mfg\":\"Google Chrome\",\"os\":\"others\",\"osVer\":\"XXX\",\"model\":\"Google Chrome\"}}"
    
    JSONObjs = json.loads(h.make_request_post(SHOW_EPISODE_URL, v_data, cookie_file, cookie_jar, TOKEN))

    for searchSet in JSONObjs:
        for rows in searchSet["assets"]:
            title = rows["title"]
            if rows["releaseDate"] != "":
                title = title + " (" + str(rows["releaseDate"]) + ")"

            h.add_dir_video(addon_handle, title, rows["hlsUrl"], img_src, rows["shortDesc"], int(rows["duration"])/1000)

    currentDisplayCounter = int(param1)
    if len(JSONObjs[0]["assets"]) >= 50 :
        currentDisplayCounter = currentDisplayCounter + 1
        h.add_dir(addon_handle, base_url, 'Next >>', showname, "episodemenu~" + str(currentDisplayCounter))
    elif len(JSONObjs[0]["assets"]) < 50 :
        currentDisplayCounter = -1
예제 #7
0
def episode():
    url = h.extract_var(args, 'url')

    name = h.extract_var(args, 'name')

    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))

    div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'})

    script = None
    scripts = div.findAll('script')
    for s in scripts:
        if 'babyenjoying' in s.text:
            script = s
            break

    master_m3u8 = script.text.split('babyenjoying = ',
                                    2)[2].split(';')[0][1:-1]

    plot = soup.find('p', {'itemprop': 'description'}).text
    thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content']

    h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
예제 #8
0
def topic1():
    videos = []
    try:
        idx = content.index('Flash Player 720p HD Quality Online Links')

        if idx != -1:
            links_content = content[idx + 1].split('\n')
            print links_content
            for link_content in links_content:
                idx1 = link_content.find('[URL=')
                idx2 = link_content.find(']')
                url = link_content[idx1 + 5: idx2]

                idx3 = link_content.find('[', idx2)
                name = link_content[idx2 + 1: idx3]

                print name, url

                soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                json_url = dict(soup.find('script', {'src': '//cdn.playwire.com/bolt/js/embed.min.js'}).attrs)['data-config']

                json_data = json.loads(h.make_request(json_url, cookie_file, cookie_jar))
                poster = json_data['poster']
                src = json_data['src']
                soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar))
                base_url = soup.find('baseurl').text
                media_node = soup.find('media')
                media_url = dict(media_node.attrs)['url']
                video_url = '%s/%s' % (base_url, media_url)

                videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                h.add_dir_video(addon_handle, name, video_url, poster, '')
    except Exception as e:
        videos = []
        links_content = ''
        print 'Flash Player', e

    print videos
    if not videos:
        try:
            idx = content.index('Letwatch 720p HD Quality Online Links')

            if idx != -1:
                links_content = content[idx + 1].split('\n')
                print links_content
                for link_content in links_content:
                    idx1 = link_content.find('[URL=')
                    idx2 = link_content.find(']')
                    url = link_content[idx1 + 5: idx2]

                    idx3 = link_content.find('[', idx2)
                    name = link_content[idx2 + 1: idx3]

                    print name, url

                    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                    div = h.bs_find_with_class(soup, 'div', 'entry-content')
                    divs = div.findAll('div', recursive=False)
                    src = dict(divs[1].find('iframe').attrs)['src']

                    print src
                    soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar))
                    scripts = soup.findAll('script')
                    rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*')
                    for script in scripts:
                        if script.text.startswith('eval'):
                            groups = re.search(rgx, script.text).groups()
                            p = groups[0]
                            base = int(groups[1])
                            c = int(groups[2])
                            k = groups[3].split('|')

                            for x in reversed(xrange(0, c)):
                                if k[x]:
                                    p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p)

                            arr = p.split(';')
                            data_str = arr[0][26:-1]
                            data = demjson.decode(data_str.replace("\\", ""))
                            video_url = ''
                            video_type = ''
                            for source in data['sources']:
                                if not video_url:
                                    video_url = source['file']
                                    video_type = source['label']
                                else:
                                    if source['label'] == 'HD':
                                        video_url = source['file']
                                        video_type = source['label']

                            print video_type, video_url

                            poster = ''
                            videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                            h.add_dir_video(addon_handle, name, video_url, poster, '')
        except Exception as e:
            videos = []
            print 'LetWatch 720', e

    print videos
    if not videos:
        try:
            idx = content.index('Vidto Link')

            if idx != -1:
                links_content = content[idx + 1].split('\n')
                print links_content
                for link_content in links_content:
                    idx1 = link_content.find('[URL=')
                    idx2 = link_content.find(']')
                    url = link_content[idx1 + 5: idx2]

                    idx3 = link_content.find('[', idx2)
                    name = link_content[idx2 + 1: idx3]

                    print name, url

                    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                    if len(soup.findChildren()) == 1:
                        meta = soup.find('meta', attrs={'http-equiv': 'refresh'})
                        if meta:
                            c = dict(meta.attrs)['content']
                            idx4 = c.find('URL=')
                            if idx4 != -1:
                                url = c[idx4 + 4:]
                                print url
                                soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))

                    div = soup.find('div', {'id': 'content'})
                    src = dict(div.find('table').find('iframe').attrs)['src']
                    print src

                    soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar))
                    div = soup.find('body').find("div", {"id": "player_code"})
                    script = None
                    scripts = div.findAll('script')
                    rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*')
                    for s in scripts:
                        if s.text.startswith('eval'):
                            script = s
                            break

                    groups = re.search(rgx, script.text).groups()
                    p = groups[0]
                    base = int(groups[1])
                    c = int(groups[2])
                    k = groups[3].split('|')

                    for x in reversed(xrange(0, c)):
                        if k[x]:
                            p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p)

                    idx5 = p.find('hd:[')
                    idx6 = p.find(']')
                    q = p[idx5 + 3:idx6 + 1]
                    j = demjson.decode(q)
                    print j

                    video_url = ''
                    size = 0
                    for _j in j:
                        s = _j['label'][:-1]
                        if s > size:
                            size = s
                            video_url = _j['file']

                    print video_url
                    if video_url:
                        poster = ''
                        videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                        h.add_dir_video(addon_handle, name, video_url, poster, '')
                return
        except Exception as e:
            videos = []
            print 'Vidto', e

    print videos
    if not videos:
        try:
            idx = content.index('Watchvideo Link')

            if idx != -1:
                links_content = content[idx + 1].split('\n')
                print links_content
                for link_content in links_content:
                    idx1 = link_content.find('[URL=')
                    idx2 = link_content.find(']')
                    url = link_content[idx1 + 5: idx2]

                    idx3 = link_content.find('[', idx2)
                    name = link_content[idx2 + 1: idx3]

                    print name, url

                    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                    if len(soup.findChildren()) == 1:
                        meta = soup.find('meta', attrs={'http-equiv': 'refresh'})
                        if meta:
                            c = dict(meta.attrs)['content']
                            idx4 = c.find('URL=')
                            if idx4 != -1:
                                url = c[idx4 + 4:]
                                print url
                                soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))

                    div = soup.find('div', {'id': 'content'})
                    src = dict(div.find('table').find('iframe').attrs)['src']
                    print src

                    soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar))
                    script = soup.find('body').find('script', recursive=False)

                    idx5 = script.text.find('sources: ')
                    idx6 = script.text.find(']')
                    j = demjson.decode(script.text[idx5 + 9:idx6 + 1])

                    video_url = ''
                    for _j in j:
                        if 'label' not in _j:
                            video_url = _j['file']

                    if video_url:
                        poster = ''
                        videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                        h.add_dir_video(addon_handle, name, video_url, poster, '')
        except Exception as e:
            videos = []
            print 'Watchvideo', e

    print videos
    if not videos:
        try:
            idx = content.index('Letwatch Link')

            if idx != -1:
                links_content = content[idx + 1].split('\n')
                print links_content
                for link_content in links_content:
                    idx1 = link_content.find('[URL=')
                    idx2 = link_content.find(']')
                    url = link_content[idx1 + 5: idx2]

                    idx3 = link_content.find('[', idx2)
                    name = link_content[idx2 + 1: idx3]

                    print name, url

                    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                    if len(soup.findChildren()) == 1:
                        meta = soup.find('meta', attrs={'http-equiv': 'refresh'})
                        if meta:
                            c = dict(meta.attrs)['content']
                            idx4 = c.find('URL=')
                            if idx4 != -1:
                                url = c[idx4 + 4:]
                                print url
                                soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))

                    div = soup.find('div', {'id': 'content'})
                    src = dict(div.find('table').find('iframe').attrs)['src']
                    print src

                    soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar))
                    scripts = soup.findAll('script')
                    rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*')
                    for script in scripts:
                        if script.text.startswith('eval'):
                            groups = re.search(rgx, script.text).groups()
                            p = groups[0]
                            base = int(groups[1])
                            c = int(groups[2])
                            k = groups[3].split('|')

                            for x in reversed(xrange(0, c)):
                                if k[x]:
                                    p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p)

                            arr = p.split(';')
                            data_str = arr[0][26:-1]
                            data = demjson.decode(data_str.replace("\\", ""))
                            video_url = ''
                            video_type = ''
                            for source in data['sources']:
                                if not video_url:
                                    video_url = source['file']
                                    video_type = source['label']
                                else:
                                    if source['label'] == 'HD':
                                        video_url = source['file']
                                        video_type = source['label']

                            print video_type, video_url

                            poster = ''
                            videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                            h.add_dir_video(addon_handle, name, video_url, poster, '')
        except Exception as e:
            videos = []
            print 'LetWatch', e

    print videos
    if not videos:
        try:
            idx = content.index('Watch Online - Flash')

            if idx != -1:
                links_content = content[idx + 1].split('\n')
                print links_content

                for link_content in links_content:
                    idx1 = link_content.find('[URL=')
                    idx2 = link_content.find(']')
                    url = link_content[idx1 + 5: idx2]

                    idx3 = link_content.find('[', idx2)
                    name = link_content[idx2 + 1: idx3]

                    print name, url

                    soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar))
                    script = soup.find('script', {'src': '//cdn.playwire.com/bolt/js/zeus/embed.js'})

                    if script:
                        data_config = dict(script.attrs)['data-config']
                        json_info = json.loads(h.make_request(data_config, cookie_file, cookie_jar))

                        poster = json_info['content']['poster']

                        f4m = json_info['content']['media']['f4m']

                        soup = BeautifulSoup(h.make_request(f4m, cookie_file, cookie_jar))
                        base_url = soup.find('baseurl').text

                        media_url = None
                        bitrate = 0
                        medias = soup.findAll('media')
                        for m in medias:
                            attrs = dict(m.attrs)
                            br = int(attrs['bitrate'])
                            if br > bitrate:
                                media_url = attrs['url']
                                bitrate = br

                        video_url = '%s/%s' % (base_url, media_url)
                        videos.append({'url': video_url, 'thumbnail': poster, 'name': name})
                        h.add_dir_video(addon_handle, name, video_url, poster, '')
        except Exception as e:
            videos = []
            print 'Watch Online - Flash', e

    print videos
    if not videos:
        import pprint
        pprint.pprint(content)